code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCAmelCase: List[str] = 'Usage of script: script_name <size_of_canvas:int>'
lowerCAmelCase: Optional[int] = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def lowerCamelCase__ ( _A ):
a : Dict = [[False for i in range(_A )] for j in range(_A )]
return canvas
def lowerCamelCase__ ( _A ):
for i, row in enumerate(_A ):
for j, _ in enumerate(_A ):
a : int = bool(random.getrandbits(1 ) )
def lowerCamelCase__ ( _A ):
a : Tuple = np.array(_A )
a : int = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(_A ):
for c, pt in enumerate(_A ):
a : Tuple = __judge_point(
_A , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
a : Tuple = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
a : list[list[bool]] = current_canvas.tolist()
return return_canvas
def lowerCamelCase__ ( _A , _A ):
a : Dict = 0
a : List[str] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
a : int = pt
if pt:
if alive < 2:
a : Union[str, Any] = False
elif alive == 2 or alive == 3:
a : Optional[Any] = True
elif alive > 3:
a : Tuple = False
else:
if alive == 3:
a : Any = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCAmelCase: Dict = int(sys.argv[1])
# main working structure of this module.
lowerCAmelCase: List[Any] = create_canvas(canvas_size)
seed(c)
lowerCAmelCase , lowerCAmelCase: Any = plt.subplots()
fig.show()
lowerCAmelCase: str = ListedColormap(['w', 'k'])
try:
while True:
lowerCAmelCase: Dict = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass | 297 |
'''simple docstring'''
def lowerCamelCase__ ( _A ):
return 10 - x * x
def lowerCamelCase__ ( _A , _A ):
# Bolzano theory in order to find if there is a root between a and b
if equation(_A ) * equation(_A ) >= 0:
raise ValueError('Wrong space!' )
a : Tuple = a
while (b - a) >= 0.01:
# Find middle point
a : Tuple = (a + b) / 2
# Check if middle point is root
if equation(_A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(_A ) * equation(_A ) < 0:
a : List[str] = c
else:
a : Tuple = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6)) | 297 | 1 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class __UpperCamelCase :
def __init__( self ):
'''simple docstring'''
__a : List[str] = {}
def __UpperCAmelCase ( self , __a , __a , __a=1 ):
'''simple docstring'''
if self.graph.get(snake_case_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
__a : Tuple = [[w, v]]
if not self.graph.get(snake_case_ ):
__a : Optional[Any] = []
def __UpperCAmelCase ( self ):
'''simple docstring'''
return list(self.graph )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if self.graph.get(snake_case_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(snake_case_ )
def __UpperCAmelCase ( self , __a=-2 , __a=-1 ):
'''simple docstring'''
if s == d:
return []
__a : Optional[Any] = []
__a : List[Any] = []
if s == -2:
__a : Union[str, Any] = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
__a : Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__a : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(snake_case_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__a : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(snake_case_ ) != 0:
__a : Tuple = stack[len(snake_case_ ) - 1]
else:
__a : Tuple = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return visited
def __UpperCAmelCase ( self , __a=-1 ):
'''simple docstring'''
if c == -1:
__a : Union[str, Any] = floor(random() * 1_0000 ) + 10
for i in range(snake_case_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
__a : str = floor(random() * c ) + 1
if n != i:
self.add_pair(snake_case_ , snake_case_ , 1 )
def __UpperCAmelCase ( self , __a=-2 ):
'''simple docstring'''
__a : Tuple = deque()
__a : str = []
if s == -2:
__a : str = list(self.graph )[0]
d.append(snake_case_ )
visited.append(snake_case_ )
while d:
__a : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Optional[int] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return len(self.graph[u] )
def __UpperCAmelCase ( self , __a=-2 ):
'''simple docstring'''
__a : str = []
__a : Any = []
if s == -2:
__a : Any = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
__a : Dict = s
__a : List[Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__a : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__a : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(snake_case_ ) != 0:
__a : Optional[int] = stack[len(snake_case_ ) - 1]
else:
__a : Union[str, Any] = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return sorted_nodes
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = []
__a : Union[str, Any] = []
__a : Optional[int] = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
__a : List[Any] = -2
__a : Union[str, Any] = []
__a : Optional[Any] = s
__a : Optional[Any] = False
__a : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__a : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__a : str = len(snake_case_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__a : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__a : List[str] = True
if len(snake_case_ ) != 0:
__a : Any = stack[len(snake_case_ ) - 1]
else:
__a : Optional[Any] = False
indirect_parents.append(snake_case_ )
__a : Union[str, Any] = s
__a : str = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return list(snake_case_ )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = []
__a : str = []
__a : Tuple = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
__a : Optional[int] = -2
__a : List[str] = []
__a : Optional[int] = s
__a : str = False
__a : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__a : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__a : Optional[Any] = len(snake_case_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__a : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__a : List[str] = True
if len(snake_case_ ) != 0:
__a : List[str] = stack[len(snake_case_ ) - 1]
else:
__a : int = False
indirect_parents.append(snake_case_ )
__a : Any = s
__a : Tuple = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return False
def __UpperCAmelCase ( self , __a=-2 , __a=-1 ):
'''simple docstring'''
__a : List[Any] = time()
self.dfs(snake_case_ , snake_case_ )
__a : Optional[Any] = time()
return end - begin
def __UpperCAmelCase ( self , __a=-2 ):
'''simple docstring'''
__a : Any = time()
self.bfs(snake_case_ )
__a : List[str] = time()
return end - begin
class __UpperCamelCase :
def __init__( self ):
'''simple docstring'''
__a : List[str] = {}
def __UpperCAmelCase ( self , __a , __a , __a=1 ):
'''simple docstring'''
if self.graph.get(snake_case_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
__a : Dict = [[w, v]]
# add the other way
if self.graph.get(snake_case_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
__a : Any = [[w, u]]
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if self.graph.get(snake_case_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(snake_case_ )
# the other way round
if self.graph.get(snake_case_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(snake_case_ )
def __UpperCAmelCase ( self , __a=-2 , __a=-1 ):
'''simple docstring'''
if s == d:
return []
__a : Dict = []
__a : Optional[int] = []
if s == -2:
__a : Any = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
__a : Optional[int] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__a : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(snake_case_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__a : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(snake_case_ ) != 0:
__a : str = stack[len(snake_case_ ) - 1]
else:
__a : Union[str, Any] = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return visited
def __UpperCAmelCase ( self , __a=-1 ):
'''simple docstring'''
if c == -1:
__a : Union[str, Any] = floor(random() * 1_0000 ) + 10
for i in range(snake_case_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
__a : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(snake_case_ , snake_case_ , 1 )
def __UpperCAmelCase ( self , __a=-2 ):
'''simple docstring'''
__a : Union[str, Any] = deque()
__a : Optional[int] = []
if s == -2:
__a : Tuple = list(self.graph )[0]
d.append(snake_case_ )
visited.append(snake_case_ )
while d:
__a : str = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
return len(self.graph[u] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = []
__a : List[str] = []
__a : str = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
__a : Tuple = -2
__a : Optional[int] = []
__a : str = s
__a : int = False
__a : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__a : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__a : Tuple = len(snake_case_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__a : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__a : Optional[Any] = True
if len(snake_case_ ) != 0:
__a : Dict = stack[len(snake_case_ ) - 1]
else:
__a : int = False
indirect_parents.append(snake_case_ )
__a : int = s
__a : Tuple = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return list(snake_case_ )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = []
__a : Tuple = []
__a : Any = list(self.graph )[0]
stack.append(snake_case_ )
visited.append(snake_case_ )
__a : List[Any] = -2
__a : Dict = []
__a : str = s
__a : Optional[Any] = False
__a : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__a : Dict = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__a : Optional[int] = len(snake_case_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__a : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__a : Any = True
if len(snake_case_ ) != 0:
__a : Any = stack[len(snake_case_ ) - 1]
else:
__a : Tuple = False
indirect_parents.append(snake_case_ )
__a : Optional[int] = s
__a : List[Any] = ss
# check if se have reached the starting point
if len(snake_case_ ) == 0:
return False
def __UpperCAmelCase ( self ):
'''simple docstring'''
return list(self.graph )
def __UpperCAmelCase ( self , __a=-2 , __a=-1 ):
'''simple docstring'''
__a : int = time()
self.dfs(snake_case_ , snake_case_ )
__a : List[str] = time()
return end - begin
def __UpperCAmelCase ( self , __a=-2 ):
'''simple docstring'''
__a : Optional[int] = time()
self.bfs(snake_case_ )
__a : str = time()
return end - begin | 371 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=400 , __a=True , __a=None , __a=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {'height': 18, 'width': 18}
__a : int = parent
__a : Dict = batch_size
__a : Optional[int] = num_channels
__a : List[Any] = image_size
__a : Tuple = min_resolution
__a : str = max_resolution
__a : str = do_resize
__a : Optional[Any] = size
__a : str = apply_ocr
def __UpperCAmelCase ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = LayoutLMvaImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , 'do_resize' ) )
self.assertTrue(hasattr(__a , 'size' ) )
self.assertTrue(hasattr(__a , 'apply_ocr' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__a : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , __a )
self.assertIsInstance(encoding.boxes , __a )
# Test batched
__a : Any = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : Tuple = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__a : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__a : List[str] = image_processing(__a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = LayoutLMvaImageProcessor()
from datasets import load_dataset
__a : str = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__a : Tuple = Image.open(ds[0]['file'] ).convert('RGB' )
__a : Optional[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__a : Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__a : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __a )
self.assertListEqual(encoding.boxes , __a )
# with apply_OCR = False
__a : List[Any] = LayoutLMvaImageProcessor(apply_ocr=__a )
__a : List[Any] = image_processing(__a , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 294 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : int = "marian"
__lowercase : List[str] = ["past_key_values"]
__lowercase : Any = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , A_=58_101 , A_=None , A_=1_024 , A_=12 , A_=4_096 , A_=16 , A_=12 , A_=4_096 , A_=16 , A_=0.0 , A_=0.0 , A_=True , A_=True , A_="gelu" , A_=1_024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=58_100 , A_=False , A_=58_100 , A_=0 , A_=0 , A_=True , **A_ , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = decoder_vocab_size or vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = use_cache
UpperCamelCase = encoder_layers
UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
class lowercase ( _SCREAMING_SNAKE_CASE ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
UpperCamelCase = {0: 'batch'}
UpperCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(A_ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
UpperCamelCase , UpperCamelCase = self.num_layers
for i in range(A_ ):
UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
UpperCamelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase = super().outputs
else:
UpperCamelCase = super(A_ , self ).outputs
if self.use_past:
UpperCamelCase , UpperCamelCase = self.num_layers
for i in range(A_ ):
UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCamelCase ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
A_ , A_ , A_ , A_ , A_ )
# Generate decoder inputs
UpperCamelCase = seq_length if not self.use_past else 1
UpperCamelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
A_ , A_ , A_ , A_ , A_ )
UpperCamelCase = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase = dict(**A_ , **A_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase = common_inputs['input_ids'].shape
UpperCamelCase = common_inputs['decoder_input_ids'].shape[1]
UpperCamelCase , UpperCamelCase = self.num_attention_heads
UpperCamelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase = decoder_seq_length + 3
UpperCamelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(A_ , A_ )] , dim=1 )
UpperCamelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase , UpperCamelCase = self.num_layers
UpperCamelCase = min(A_ , A_ )
UpperCamelCase = max(A_ , A_ ) - min_num_layers
UpperCamelCase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(A_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
) )
# TODO: test this.
UpperCamelCase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(A_ , A_ ):
common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) )
return common_inputs
def __UpperCamelCase ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
A_ , A_ , A_ , A_ , A_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase = seqlen + 2
UpperCamelCase , UpperCamelCase = self.num_layers
UpperCamelCase , UpperCamelCase = self.num_attention_heads
UpperCamelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase = common_inputs['attention_mask'].dtype
UpperCamelCase = torch.cat(
[common_inputs['attention_mask'], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
UpperCamelCase = [
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ )
]
return common_inputs
def __UpperCamelCase ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase = tokenizer.num_special_tokens_to_add(A_ )
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase = dict(tokenizer(A_ , return_tensors=A_ ) )
return common_inputs
def __UpperCamelCase ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
else:
UpperCamelCase = self._generate_dummy_inputs_for_causal_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
return common_inputs
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ ) -> int:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ )
else:
UpperCamelCase = super(A_ , self )._flatten_past_key_values_(
A_ , A_ , A_ , A_ )
@property
def __UpperCamelCase ( self ) -> float:
"""simple docstring"""
return 1e-4
| 222 | 0 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
A : Optional[Any] = logging.get_logger(__name__)
def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[int] ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : str=None ):
# Recurse if needed
if "." in tensor_name:
_A : List[Any] = tensor_name.split('.' )
for split in splits[:-1]:
_A : Dict = getattr(__lowerCAmelCase ,__lowerCAmelCase )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
_A : Optional[Any] = new_module
_A : Any = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
_A : List[str] = tensor_name in module._buffers
_A : Dict = getattr(__lowerCAmelCase ,__lowerCAmelCase )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
_A : List[str] = False
_A : str = False
if is_buffer or not is_bitsandbytes_available():
_A : Optional[int] = False
_A : List[Any] = False
else:
_A : str = hasattr(bnb.nn ,'Params4bit' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
_A : str = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
_A : Dict = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_A : Tuple = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,torch.Tensor ):
_A : int = value.to('cpu' )
if value.dtype == torch.inta:
_A : str = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
_A : Optional[Any] = torch.tensor(__lowerCAmelCase ,device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,__lowerCAmelCase ) and fpaa_statistics is None:
_A : Optional[int] = new_value.T
_A : Union[str, Any] = old_value.__dict__
if is_abit:
_A : str = bnb.nn.IntaParams(__lowerCAmelCase ,requires_grad=__lowerCAmelCase ,**__lowerCAmelCase ).to(__lowerCAmelCase )
elif is_abit:
_A : Union[str, Any] = bnb.nn.Paramsabit(__lowerCAmelCase ,requires_grad=__lowerCAmelCase ,**__lowerCAmelCase ).to(__lowerCAmelCase )
_A : Optional[Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'SCB' ,fpaa_statistics.to(__lowerCAmelCase ) )
else:
if value is None:
_A : str = old_value.to(__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,torch.Tensor ):
_A : List[str] = value.to(__lowerCAmelCase )
else:
_A : Optional[int] = torch.tensor(__lowerCAmelCase ,device=__lowerCAmelCase )
if is_buffer:
_A : List[str] = new_value
else:
_A : List[Any] = nn.Parameter(__lowerCAmelCase ,requires_grad=old_value.requires_grad )
_A : Dict = new_value
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : int=None ,lowerCamelCase : List[Any]=None ,lowerCamelCase : List[str]=None ,lowerCamelCase : Optional[Any]=False ):
for name, module in model.named_children():
if current_key_name is None:
_A : Union[str, Any] = []
current_key_name.append(__lowerCAmelCase )
if (isinstance(__lowerCAmelCase ,nn.Linear ) or isinstance(__lowerCAmelCase ,__lowerCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(__lowerCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_A : Tuple = module.weight.shape
else:
_A : str = module.in_features
_A : Dict = module.out_features
if quantization_config.quantization_method() == "llm_int8":
_A : Dict = bnb.nn.LinearabitLt(
__lowerCAmelCase ,__lowerCAmelCase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
_A : Tuple = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_A : Optional[int] = bnb.nn.Linearabit(
__lowerCAmelCase ,__lowerCAmelCase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
_A : int = True
# Store the module class in case we need to transpose the weight later
_A : Dict = type(__lowerCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(__lowerCAmelCase )
if len(list(module.children() ) ) > 0:
_A : Tuple = _replace_with_bnb_linear(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,has_been_replaced=__lowerCAmelCase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase__ ( lowerCamelCase : List[str] ,lowerCamelCase : Any=None ,lowerCamelCase : int=None ,lowerCamelCase : List[str]=None ):
_A : int = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
_A : Any = _replace_with_bnb_linear(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def lowerCAmelCase__ ( *lowerCamelCase : Optional[int] ,**lowerCamelCase : Tuple ):
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' ,__lowerCAmelCase ,)
return replace_with_bnb_linear(*__lowerCAmelCase ,**__lowerCAmelCase )
def lowerCAmelCase__ ( *lowerCamelCase : Dict ,**lowerCamelCase : Dict ):
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' ,__lowerCAmelCase ,)
return set_module_quantized_tensor_to_device(*__lowerCAmelCase ,**__lowerCAmelCase )
def lowerCAmelCase__ ( lowerCamelCase : Tuple ):
_A : List[Any] = deepcopy(__lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_A : List[str] = find_tied_parameters(__lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_A : Union[str, Any] = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
_A : List[Any] = sum(__lowerCAmelCase ,[] )
_A : str = len(__lowerCAmelCase ) > 0
# Check if it is a base model
_A : Optional[int] = not hasattr(__lowerCAmelCase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_A : int = list(model.named_children() )
_A : str = [list_modules[-1][0]]
# add last module together with tied weights
_A : Any = set(__lowerCAmelCase ) - set(__lowerCAmelCase )
_A : Any = list(set(__lowerCAmelCase ) ) + list(__lowerCAmelCase )
# remove ".weight" from the keys
_A : Any = [""".weight""", """.bias"""]
_A : Optional[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_A : Optional[int] = name.replace(__lowerCAmelCase ,'' )
filtered_module_names.append(__lowerCAmelCase )
return filtered_module_names
| 362 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : List[str] = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 227 | 0 |
from PIL import Image
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = (259 * (level + 255)) / (255 * (259 - level))
def contrast(_UpperCAmelCase ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_UpperCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
__snake_case :Optional[int] = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 49 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
_UpperCamelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def _lowercase ( lowercase__ ):
__lowerCAmelCase : List[str] = {}
with open(lowercase__ , '''r''' ) as file:
for line_number, line in enumerate(lowercase__ ):
__lowerCAmelCase : Any = line.strip()
if line:
__lowerCAmelCase : Dict = line.split()
__lowerCAmelCase : str = line_number
__lowerCAmelCase : List[str] = words[0]
__lowerCAmelCase : Any = value
return result
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
for attribute in key.split('''.''' ):
__lowerCAmelCase : List[Any] = getattr(lowercase__ , lowercase__ )
__lowerCAmelCase : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase__ ):
__lowerCAmelCase : Tuple = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__lowerCAmelCase : List[Any] = '''param'''
if weight_type is not None and weight_type != "param":
__lowerCAmelCase : str = getattr(lowercase__ , lowercase__ ).shape
elif weight_type is not None and weight_type == "param":
__lowerCAmelCase : Dict = hf_pointer
for attribute in hf_param_name.split('''.''' ):
__lowerCAmelCase : Dict = getattr(lowercase__ , lowercase__ )
__lowerCAmelCase : str = shape_pointer.shape
# let's reduce dimension
__lowerCAmelCase : Any = value[0]
else:
__lowerCAmelCase : str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : List[str] = value
elif weight_type == "weight_v":
__lowerCAmelCase : int = value
elif weight_type == "bias":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
__lowerCAmelCase : Dict = getattr(lowercase__ , lowercase__ )
__lowerCAmelCase : Tuple = value
else:
__lowerCAmelCase : Any = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Any = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowercase__ ):
__lowerCAmelCase : str = PARAM_MAPPING[full_name.split('''.''' )[-1]]
__lowerCAmelCase : int = '''param'''
if weight_type is not None and weight_type != "param":
__lowerCAmelCase : Tuple = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__lowerCAmelCase : List[str] = '''.'''.join([key, hf_param_name] )
else:
__lowerCAmelCase : Optional[int] = key
__lowerCAmelCase : Union[str, Any] = value if '''lm_head''' in full_key else value[0]
_UpperCamelCase = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def _lowercase ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ):
__lowerCAmelCase : Any = False
for key, mapped_key in MAPPING.items():
__lowerCAmelCase : Tuple = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__lowerCAmelCase : Optional[Any] = True
if "*" in mapped_key:
__lowerCAmelCase : List[str] = name.split(lowercase__ )[0].split('''.''' )[-2]
__lowerCAmelCase : Dict = mapped_key.replace('''*''' , lowercase__ )
if "weight_g" in name:
__lowerCAmelCase : List[Any] = '''weight_g'''
elif "weight_v" in name:
__lowerCAmelCase : List[Any] = '''weight_v'''
elif "bias" in name:
__lowerCAmelCase : Any = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase : int = '''weight'''
else:
__lowerCAmelCase : Any = None
if hf_dict is not None:
rename_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
else:
set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return is_used
return is_used
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : List[str] = []
__lowerCAmelCase : Optional[Any] = fairseq_model.state_dict()
__lowerCAmelCase : Tuple = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase : Any = False
if "conv_layers" in name:
load_conv_layer(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == '''group''' , )
__lowerCAmelCase : int = True
else:
__lowerCAmelCase : Dict = load_wavaveca_layer(lowercase__ , lowercase__ , lowercase__ )
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
__lowerCAmelCase : Any = full_name.split('''conv_layers.''' )[-1]
__lowerCAmelCase : List[str] = name.split('''.''' )
__lowerCAmelCase : Any = int(items[0] )
__lowerCAmelCase : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCAmelCase : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCAmelCase : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCAmelCase : List[str] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCAmelCase : Optional[int] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowercase__ )
@torch.no_grad()
def _lowercase ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=True , lowercase__=False ):
if config_path is not None:
__lowerCAmelCase : Union[str, Any] = WavaVecaConfig.from_pretrained(lowercase__ )
else:
__lowerCAmelCase : Optional[int] = WavaVecaConfig()
if is_seq_class:
__lowerCAmelCase : Optional[Any] = read_txt_into_dict(lowercase__ )
__lowerCAmelCase : int = idalabel
__lowerCAmelCase : Optional[int] = WavaVecaForSequenceClassification(lowercase__ )
__lowerCAmelCase : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , )
feature_extractor.save_pretrained(lowercase__ )
elif is_finetuned:
if dict_path:
__lowerCAmelCase : List[str] = Dictionary.load(lowercase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCAmelCase : List[Any] = target_dict.pad_index
__lowerCAmelCase : List[Any] = target_dict.bos_index
__lowerCAmelCase : Optional[int] = target_dict.eos_index
__lowerCAmelCase : Any = len(target_dict.symbols )
__lowerCAmelCase : Union[str, Any] = os.path.join(lowercase__ , '''vocab.json''' )
if not os.path.isdir(lowercase__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase__ ) )
return
os.makedirs(lowercase__ , exist_ok=lowercase__ )
__lowerCAmelCase : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : int = 1
with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(lowercase__ , lowercase__ )
__lowerCAmelCase : Dict = WavaVecaCTCTokenizer(
lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase__ , )
__lowerCAmelCase : List[str] = True if config.feat_extract_norm == '''layer''' else False
__lowerCAmelCase : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , )
__lowerCAmelCase : List[Any] = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ )
processor.save_pretrained(lowercase__ )
__lowerCAmelCase : str = WavaVecaForCTC(lowercase__ )
else:
__lowerCAmelCase : Any = WavaVecaForPreTraining(lowercase__ )
if is_finetuned or is_seq_class:
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__lowerCAmelCase : Union[str, Any] = argparse.Namespace(task='''audio_pretraining''' )
__lowerCAmelCase : str = fairseq.tasks.setup_task(lowercase__ )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase__ )
__lowerCAmelCase : int = model[0].eval()
recursively_load_weights(lowercase__ , lowercase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowercase__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 275 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {'configuration_wavlm': ['WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WavLMConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'WavLMForAudioFrameClassification',
'WavLMForCTC',
'WavLMForSequenceClassification',
'WavLMForXVector',
'WavLMModel',
'WavLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 355 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
__A = {'''allegro/herbert-base-cased''': 5_14}
__A = {}
class lowercase_ ( __lowercase ):
UpperCamelCase_ : Any = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : List[str] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Optional[Any] = HerbertTokenizer
def __init__( self : Tuple , A__ : str=None , A__ : Optional[Any]=None , A__ : Union[str, Any]=None , A__ : Optional[int]="<s>" , A__ : Optional[int]="<unk>" , A__ : str="<pad>" , A__ : List[Any]="<mask>" , A__ : Dict="</s>" , **A__ : Optional[int] , ) -> Optional[int]:
super().__init__(
A__ , A__ , tokenizer_file=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , sep_token=A__ , **A__ , )
def UpperCamelCase_ ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Tuple , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
if token_ids_a is None:
return [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1] + ([0] * len(A__ )) + [1]
def UpperCamelCase_ ( self : Any , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
_snake_case = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 278 | 0 |
import unittest
import numpy as np
def __UpperCamelCase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray | None = None , ):
__a : Union[str, Any] = np.shape(UpperCAmelCase_ )
__a : Optional[int] = np.shape(UpperCAmelCase_ )
__a : Union[str, Any] = np.shape(UpperCAmelCase_ )
if shape_a[0] != shape_b[0]:
__a : Optional[Any] = (
'Expected the same number of rows for A and B. '
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(UpperCAmelCase_ )
if shape_b[1] != shape_c[1]:
__a : Optional[int] = (
'Expected the same number of columns for B and C. '
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(UpperCAmelCase_ )
__a : int = pseudo_inv
if a_inv is None:
try:
__a : Union[str, Any] = np.linalg.inv(UpperCAmelCase_ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : List[str] ):
__a : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__a : Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
__a : Optional[int] = np.array([[2, 1], [6, 3]] )
__a : int = schur_complement(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__a : str = np.block([[a, b], [b.T, c]] )
__a : List[str] = np.linalg.det(SCREAMING_SNAKE_CASE_ )
__a : List[str] = np.linalg.det(SCREAMING_SNAKE_CASE_ )
__a : Tuple = np.linalg.det(SCREAMING_SNAKE_CASE_ )
self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ , det_a * det_s )
def lowerCAmelCase (self : Optional[int] ):
__a : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__a : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
__a : Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
schur_complement(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase (self : int ):
__a : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__a : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
__a : Optional[int] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
schur_complement(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 216 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
A__ : Dict = None
A__ : List[Any] = logging.get_logger(__name__)
A__ : str = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
A__ : Union[str, Any] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
A__ : Any = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
A__ : Dict = """▁"""
# Segments (not really needed)
A__ : List[str] = 0
A__ : List[Any] = 1
A__ : Union[str, Any] = 2
A__ : List[Any] = 3
A__ : str = 4
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : str = VOCAB_FILES_NAMES
lowerCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] = 'left'
lowerCamelCase : Optional[Any] = XLNetTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="<s>" , SCREAMING_SNAKE_CASE_="</s>" , SCREAMING_SNAKE_CASE_="<unk>" , SCREAMING_SNAKE_CASE_="<sep>" , SCREAMING_SNAKE_CASE_="<pad>" , SCREAMING_SNAKE_CASE_="<cls>" , SCREAMING_SNAKE_CASE_="<mask>" , SCREAMING_SNAKE_CASE_=["<eop>", "<eod>"] , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , remove_space=SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : int = do_lower_case
__lowerCamelCase : Optional[Any] = remove_space
__lowerCamelCase : int = keep_accents
__lowerCamelCase : Any = vocab_file
__lowerCamelCase : Any = False if not self.vocab_file else True
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : Optional[Any] = [self.sep_token_id]
__lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : int = [self.sep_token_id]
__lowerCamelCase : Dict = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCamelCase : Tuple = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 185 | 0 |
"""simple docstring"""
import os
import string
import sys
__A = 1 << 8
__A = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
__A = KEYMAP['up']
__A = KEYMAP['left']
if sys.platform == "win32":
__A = []
__A = {
B'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
B'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
__A = ord(str(i))
def _lowerCamelCase() -> List[str]:
if os.name == "nt":
import msvcrt
_lowerCAmelCase ="""mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCamelCase ) == 0:
# Read the keystroke
_lowerCAmelCase =msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCAmelCase =ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCAmelCase =chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__UpperCamelCase )
if ord(__UpperCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowerCAmelCase =chr(KEYMAP["""esc"""] )
except KeyError:
_lowerCAmelCase =cha[1]
else:
_lowerCAmelCase =ch.decode(__UpperCamelCase )
else:
_lowerCAmelCase =WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCAmelCase =sys.stdin.fileno()
_lowerCAmelCase =termios.tcgetattr(__UpperCamelCase )
try:
tty.setraw(__UpperCamelCase )
_lowerCAmelCase =sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCamelCase , termios.TCSADRAIN , __UpperCamelCase )
return ch
def _lowerCamelCase() -> Dict:
_lowerCAmelCase =get_raw_chars()
if ord(__UpperCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCamelCase ) == KEYMAP["esc"]:
_lowerCAmelCase =get_raw_chars()
if ord(__UpperCamelCase ) == KEYMAP["mod_int"]:
_lowerCAmelCase =get_raw_chars()
if ord(__UpperCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 341 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'spiece.model'}
__A = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
__A = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
__A = 0
__A = 1
__A = 2
__A = 3
__A = 4
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = '''left'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<sep>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<cls>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=["<eop>", "<eod>"] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
_lowerCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_lowerCAmelCase =3
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =remove_space
_lowerCAmelCase =keep_accents
_lowerCAmelCase =vocab_file
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> str:
return len(self.sp_model )
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
_lowerCAmelCase =self.__dict__.copy()
_lowerCAmelCase =None
return state
def __setstate__( self , __UpperCAmelCase ) -> Tuple:
_lowerCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase ={}
_lowerCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
if self.remove_space:
_lowerCAmelCase =""" """.join(inputs.strip().split() )
else:
_lowerCAmelCase =inputs
_lowerCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_lowerCAmelCase =unicodedata.normalize("""NFKD""" , __UpperCAmelCase )
_lowerCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__UpperCAmelCase )] )
if self.do_lower_case:
_lowerCAmelCase =outputs.lower()
return outputs
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_lowerCAmelCase =self.preprocess_text(__UpperCAmelCase )
_lowerCAmelCase =self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
_lowerCAmelCase =[]
for piece in pieces:
if len(__UpperCAmelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_lowerCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCAmelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_lowerCAmelCase =cur_pieces[1:]
else:
_lowerCAmelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCAmelCase )
else:
new_pieces.append(__UpperCAmelCase )
return new_pieces
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> Optional[Any]:
return self.sp_model.PieceToId(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.IdToPiece(__UpperCAmelCase )
def _lowerCAmelCase ( self , __UpperCAmelCase ) -> str:
_lowerCAmelCase ="""""".join(__UpperCAmelCase ).replace(__UpperCAmelCase , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ) -> str:
_lowerCAmelCase =kwargs.pop("""use_source_tokenizer""" , __UpperCAmelCase )
_lowerCAmelCase =self.convert_ids_to_tokens(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_lowerCAmelCase =[]
_lowerCAmelCase =[]
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
_lowerCAmelCase =[]
sub_texts.append(__UpperCAmelCase )
else:
current_sub_text.append(__UpperCAmelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(__UpperCAmelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_lowerCAmelCase ="""""".join(__UpperCAmelCase )
_lowerCAmelCase =(
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_lowerCAmelCase =self.clean_up_tokenization(__UpperCAmelCase )
return clean_text
else:
return text
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1, 1]
return ([0] * len(__UpperCAmelCase )) + [1, 1]
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase =os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , """wb""" ) as fi:
_lowerCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 341 | 1 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase__ = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
UpperCAmelCase__ = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase__ = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase__ = sorted(arg_to_scheduler.keys())
UpperCAmelCase__ = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class lowercase_ ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : str , __UpperCAmelCase : argparse.Namespace , __UpperCAmelCase : str=None , __UpperCAmelCase : List[str]="base" , __UpperCAmelCase : str=None , __UpperCAmelCase : int=None , __UpperCAmelCase : Optional[Any]=None , **__UpperCAmelCase : Optional[int] , ) ->Tuple:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__UpperCAmelCase )
a = 0
a = Path(self.hparams.output_dir )
a = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
a = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=__UpperCAmelCase , **__UpperCAmelCase , )
else:
a = config
a = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , __UpperCAmelCase , __UpperCAmelCase ):
assert hasattr(self.config , __UpperCAmelCase ), F"""model config doesn't have a `{p}` attribute"""
setattr(self.config , __UpperCAmelCase , getattr(self.hparams , __UpperCAmelCase ) )
if tokenizer is None:
a = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__UpperCAmelCase , )
else:
a = tokenizer
a = MODEL_MODES[mode]
if model is None:
a = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__UpperCAmelCase , )
else:
a = model
def __lowerCAmelCase ( self : Any , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : str ) ->str:
"""simple docstring"""
a = self.model_type.from_pretrained(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
a = arg_to_scheduler[self.hparams.lr_scheduler]
a = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
a = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
a = self.model
a = ['''bias''', '''LayerNorm.weight''']
a = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
a = Adafactor(
__UpperCAmelCase , lr=self.hparams.learning_rate , scale_parameter=__UpperCAmelCase , relative_step=__UpperCAmelCase )
else:
a = AdamW(
__UpperCAmelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
a = optimizer
a = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] ) ->Dict:
"""simple docstring"""
return self.validation_step(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] ) ->Any:
"""simple docstring"""
return self.validation_end(__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
a = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
a = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Dict ) ->Optional[int]:
"""simple docstring"""
if stage == "test":
a = len(self.test_dataloader().dataset )
else:
a = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=__UpperCAmelCase )
a = len(self.train_dataloader().dataset )
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : bool = False ) ->str:
"""simple docstring"""
raise NotImplementedError('''You must implement this for your task''' )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
return self.train_loader
def __lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Tuple:
"""simple docstring"""
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=__UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Optional[int] ) ->Optional[Any]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
__UpperCAmelCase , list(filter(__UpperCAmelCase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : Dict[str, Any] ) ->None:
"""simple docstring"""
a = self.output_dir.joinpath('''best_tfmr''' )
a = self.step_count
self.model.save_pretrained(__UpperCAmelCase )
self.tokenizer.save_pretrained(__UpperCAmelCase )
@staticmethod
def __lowerCAmelCase ( __UpperCAmelCase : Dict , __UpperCAmelCase : int ) ->int:
"""simple docstring"""
parser.add_argument(
'''--model_name_or_path''' , default=__UpperCAmelCase , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=__UpperCAmelCase , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(__UpperCAmelCase ).parent / '''test_run''' / '''cache''' ) , type=__UpperCAmelCase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=__UpperCAmelCase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=__UpperCAmelCase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=__UpperCAmelCase , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=__UpperCAmelCase , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5e-5 , type=__UpperCAmelCase , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=__UpperCAmelCase , metavar=__UpperCAmelCase , type=__UpperCAmelCase , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__UpperCAmelCase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=__UpperCAmelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=__UpperCAmelCase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=__UpperCAmelCase , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=__UpperCAmelCase )
parser.add_argument('''--train_batch_size''' , default=32 , type=__UpperCAmelCase )
parser.add_argument('''--eval_batch_size''' , default=32 , type=__UpperCAmelCase )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class lowercase_ ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] ) ->int:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowercase_ ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__UpperCAmelCase )
class lowercase_ ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict ) ->int:
"""simple docstring"""
a = trainer.lr_schedulers[0]['''scheduler''']
a = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__UpperCAmelCase )
def __lowerCAmelCase ( self : Any , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : pl.LightningModule ) ->Union[str, Any]:
"""simple docstring"""
rank_zero_info('''***** Validation results *****''' )
a = trainer.callback_metrics
# Log results
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(__UpperCAmelCase , str(metrics[key] ) ) )
def __lowerCAmelCase ( self : int , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : pl.LightningModule ) ->Optional[int]:
"""simple docstring"""
rank_zero_info('''***** Test results *****''' )
a = trainer.callback_metrics
# Log and save results to file
a = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(__UpperCAmelCase , '''w''' ) as writer:
for key in sorted(__UpperCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(__UpperCAmelCase , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(__UpperCAmelCase , str(metrics[key] ) ) )
def _a ( a :Union[str, Any] , a :int ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=a , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=a )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=a , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=a , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def _a ( a :BaseTransformer , a :argparse.Namespace , a :Tuple=None , a :Any=True , a :List[str]=[] , a :List[Any]=None , a :Union[str, Any]=None , **a :Optional[Any] , ) -> List[str]:
pl.seed_everything(args.seed )
# init model
a = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=a )
# add custom checkpoints
if checkpoint_callback is None:
a = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(a )
if logging_callback is None:
a = LoggingCallback()
a = {}
if args.fpaa:
a = 16
if args.gpus > 1:
a = '''auto'''
a = '''ddp'''
a = args.accumulate_grad_batches
a = None
a = '''auto'''
a = pl.Trainer.from_argparse_args(
a , weights_summary=a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=a , val_check_interval=1 , num_sanity_val_steps=2 , **a , )
if args.do_train:
trainer.fit(a )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_A = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 171 | 0 |
A__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : int = [False] * len(__lowerCAmelCase )
snake_case__ : Union[str, Any] = [s]
snake_case__ : List[str] = True
while queue:
snake_case__ : Tuple = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__lowerCAmelCase )
snake_case__ : Any = True
snake_case__ : str = u
return visited[t]
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Union[str, Any] = [-1] * (len(__lowerCAmelCase ))
snake_case__ : Tuple = 0
snake_case__ : Optional[int] = []
snake_case__ : Optional[Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
snake_case__ : Union[str, Any] = float('''Inf''' )
snake_case__ : List[str] = sink
while s != source:
# Find the minimum value in select path
snake_case__ : Optional[Any] = min(__lowerCAmelCase , graph[parent[s]][s] )
snake_case__ : Any = parent[s]
max_flow += path_flow
snake_case__ : Union[str, Any] = sink
while v != source:
snake_case__ : Tuple = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
snake_case__ : int = parent[v]
for i in range(len(__lowerCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 351 |
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 44 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 233 |
# flake8: noqa
# Lint as: python3
lowerCamelCase : Optional[Any] = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 233 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger()
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :nn.Module
lowerCamelCase :List[nn.Module] = field(default_factory=__lowerCAmelCase )
lowerCamelCase :list = field(default_factory=__lowerCAmelCase )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_A = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase_ , nn.Convad ) or isinstance(lowerCAmelCase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_ ) -> Any:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase_ )
[x.remove() for x in self.handles]
return self
@property
def UpperCAmelCase ( self ) -> Tuple:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda lowerCAmelCase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a :
"""simple docstring"""
lowerCamelCase :nn.Module
lowerCamelCase :nn.Module
lowerCamelCase :int = 0
lowerCamelCase :List = field(default_factory=__lowerCAmelCase )
lowerCamelCase :List = field(default_factory=__lowerCAmelCase )
def __call__( self , lowerCAmelCase_ ) -> Tuple:
_A = Tracker(self.dest )(lowerCAmelCase_ ).parametrized
_A = Tracker(self.src )(lowerCAmelCase_ ).parametrized
_A = list(filter(lambda lowerCAmelCase_ : type(lowerCAmelCase_ ) not in self.src_skip , lowerCAmelCase_ ) )
_A = list(filter(lambda lowerCAmelCase_ : type(lowerCAmelCase_ ) not in self.dest_skip , lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCAmelCase_ )} operations while'''
F''' destination module has {len(lowerCAmelCase_ )}.''' )
for dest_m, src_m in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def snake_case ( snake_case__ :str , snake_case__ :ResNetConfig , snake_case__ :Path , snake_case__ :bool = True) -> Optional[Any]:
print(F'''Converting {name}...''')
with torch.no_grad():
_A = timm.create_model(snake_case__ , pretrained=snake_case__).eval()
_A = ResNetForImageClassification(snake_case__).eval()
_A = ModuleTransfer(src=snake_case__ , dest=snake_case__)
_A = torch.randn((1, 3, 224, 224))
module_transfer(snake_case__)
assert torch.allclose(from_model(snake_case__) , our_model(snake_case__).logits), "The model logits don't match the original one."
_A = F'''resnet{'-'.join(name.split('resnet'))}'''
print(snake_case__)
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=snake_case__ , )
# we can use the convnext one
_A = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""")
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=snake_case__ , )
print(F'''Pushed {checkpoint_name}''')
def snake_case ( snake_case__ :Path , snake_case__ :str = None , snake_case__ :bool = True) -> int:
_A = """imagenet-1k-id2label.json"""
_A = 1_000
_A = (1, num_labels)
_A = """huggingface/label-files"""
_A = num_labels
_A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""") , """r"""))
_A = {int(snake_case__): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
_A = partial(snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__)
_A = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic"""),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="""bottleneck"""),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic"""),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="""bottleneck"""),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="""bottleneck"""),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1_024, 2_048] , layer_type="""bottleneck"""),
}
if model_name:
convert_weight_and_push(snake_case__ , names_to_config[model_name] , snake_case__ , snake_case__)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(snake_case__ , snake_case__ , snake_case__ , snake_case__)
return config, expected_shape
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 81 | def snake_case ( snake_case__ :str , snake_case__ :str) -> list:
_A = len(snake_case__)
_A = []
for i in range(len(snake_case__) - pat_len + 1):
_A = True
for j in range(snake_case__):
if s[i + j] != pattern[j]:
_A = False
break
if match_found:
position.append(snake_case__)
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 81 | 1 |
"""simple docstring"""
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def _lowercase ( ) -> Optional[Any]:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowerCAmelCase : int = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching ,"os.path.join" ,__snake_case ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os ,_PatchedModuleObj )
assert isinstance(_test_patching.os.path ,_PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path ,_PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os ,_PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path ,_PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path ,_PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def _lowercase ( ) -> str:
assert _test_patching.open is open
__lowerCAmelCase : Union[str, Any] = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching ,"open" ,__snake_case ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def _lowercase ( ) -> Tuple:
# pandas.read_csv is not present in _test_patching
__lowerCAmelCase : List[Any] = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching ,"pandas.read_csv" ,__snake_case ):
pass
def _lowercase ( ) -> Tuple:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__lowerCAmelCase : Optional[int] = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching ,"len" ,__snake_case ) is None
with patch_submodule(_test_patching ,"len" ,__snake_case ):
assert _test_patching.len is mock
assert _test_patching.len is len
def _lowercase ( ) -> Optional[Any]:
__lowerCAmelCase : int = "__test_patch_submodule_start_and_stop_mock__"
__lowerCAmelCase : Tuple = patch_submodule(_test_patching ,"open" ,__snake_case )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def _lowercase ( ) -> Optional[Any]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowerCAmelCase : Dict = "__test_patch_submodule_successive_join__"
__lowerCAmelCase : Optional[int] = "__test_patch_submodule_successive_dirname__"
__lowerCAmelCase : str = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching ,"os.path.join" ,__snake_case ):
with patch_submodule(_test_patching ,"os.rename" ,__snake_case ):
with patch_submodule(_test_patching ,"os.path.dirname" ,__snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching ,"os.rename" ,__snake_case ):
with patch_submodule(_test_patching ,"os.path.join" ,__snake_case ):
with patch_submodule(_test_patching ,"os.path.dirname" ,__snake_case ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def _lowercase ( ) -> Tuple:
__lowerCAmelCase : Optional[int] = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching ,"__module_that_doesn_exist__.__attribute_that_doesn_exist__" ,__snake_case ):
pass
with patch_submodule(_test_patching ,"os.__attribute_that_doesn_exist__" ,__snake_case ):
pass | 269 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__snake_case : Optional[int] = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 269 | 1 |
from __future__ import annotations
from typing import Generic, TypeVar
_snake_case = TypeVar("""T""")
class lowerCAmelCase ( Generic[T] ):
def __init__( self :int , _lowercase :T ):
'''simple docstring'''
lowercase__ = data
lowercase__ = self
lowercase__ = 0
class lowerCAmelCase ( Generic[T] ):
def __init__( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = {}
def UpperCAmelCase ( self :int , _lowercase :T ):
'''simple docstring'''
lowercase__ = DisjointSetTreeNode(_lowercase )
def UpperCAmelCase ( self :List[str] , _lowercase :T ):
'''simple docstring'''
lowercase__ = self.map[data]
if elem_ref != elem_ref.parent:
lowercase__ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def UpperCAmelCase ( self :str , _lowercase :DisjointSetTreeNode[T] , _lowercase :DisjointSetTreeNode[T] ):
'''simple docstring'''
if nodea.rank > nodea.rank:
lowercase__ = nodea
else:
lowercase__ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def UpperCAmelCase ( self :Dict , _lowercase :T , _lowercase :T ):
'''simple docstring'''
self.link(self.find_set(_lowercase ) , self.find_set(_lowercase ) )
class lowerCAmelCase ( Generic[T] ):
def __init__( self :str ):
'''simple docstring'''
lowercase__ = {}
def UpperCAmelCase ( self :str , _lowercase :T ):
'''simple docstring'''
if node not in self.connections:
lowercase__ = {}
def UpperCAmelCase ( self :Optional[int] , _lowercase :T , _lowercase :T , _lowercase :int ):
'''simple docstring'''
self.add_node(_lowercase )
self.add_node(_lowercase )
lowercase__ = weight
lowercase__ = weight
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = []
lowercase__ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _lowercase : x[2] )
# creating the disjoint set
lowercase__ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_lowercase )
# MST generation
lowercase__ = 0
lowercase__ = 0
lowercase__ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowercase__ , lowercase__ , lowercase__ = edges[index]
index += 1
lowercase__ = disjoint_set.find_set(_lowercase )
lowercase__ = disjoint_set.find_set(_lowercase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_lowercase , _lowercase , _lowercase )
disjoint_set.union(_lowercase , _lowercase )
return graph
| 201 |
def _A ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
if height >= 1:
move_tower(height - 1 , __magic_name__ , __magic_name__ , __magic_name__ )
move_disk(__magic_name__ , __magic_name__ )
move_tower(height - 1 , __magic_name__ , __magic_name__ , __magic_name__ )
def _A ( __magic_name__ , __magic_name__ ):
print("moving disk from" , __magic_name__ , "to" , __magic_name__ )
def _A ( ):
lowercase__ = int(input("Height of hanoi: " ).strip() )
move_tower(__magic_name__ , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 201 | 1 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def A ( snake_case :Union[str, Any] , snake_case :str=7 ) -> Any:
__UpperCamelCase = None
if token is not None:
__UpperCamelCase = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'}
# The id of a workflow (not of a workflow run)
__UpperCamelCase = '636036'
__UpperCamelCase = f'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
__UpperCamelCase = requests.get(snake_case , headers=snake_case ).json()
return result["workflow_runs"]
def A ( snake_case :Dict ) -> Union[str, Any]:
__UpperCamelCase = get_daily_ci_runs(snake_case )
__UpperCamelCase = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__UpperCamelCase = workflow_run['id']
break
return workflow_run_id
def A ( snake_case :Tuple , snake_case :Union[str, Any] , snake_case :List[Any] ) -> int:
__UpperCamelCase = get_last_daily_ci_runs(snake_case )
if workflow_run_id is not None:
__UpperCamelCase = get_artifacts_links(worflow_run_id=snake_case , token=snake_case )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__UpperCamelCase = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case , artifact_url=snake_case , output_dir=snake_case , token=snake_case )
def A ( snake_case :str , snake_case :List[Any] , snake_case :List[str] ) -> Any:
get_last_daily_ci_artifacts(snake_case , snake_case , snake_case )
__UpperCamelCase = {}
for artifact_name in artifact_names:
__UpperCamelCase = os.path.join(snake_case , f'{artifact_name}.zip' )
if os.path.isfile(snake_case ):
__UpperCamelCase = {}
with zipfile.ZipFile(snake_case ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case ):
# read the file
with z.open(snake_case ) as f:
__UpperCamelCase = f.read().decode('UTF-8' )
return results
| 316 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCamelCase : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = v.to_dict()
return d
| 316 | 1 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCamelCase_ ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase__ : int) ->Dict:
'''simple docstring'''
super().__init__()
A__ = model
A__ = 2
A__ = nn.Linear(self.model.config.hidden_size , self.num_labels)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Any:
"""simple docstring"""
A__ = LongformerModel.from_pretrained(lowerCAmelCase__ )
A__ = LightningModel(lowerCAmelCase__ )
A__ = torch.load(lowerCAmelCase__ , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
A__ = LongformerForQuestionAnswering.from_pretrained(lowerCAmelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCAmelCase__ )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 360 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any=14 , UpperCAmelCase__ : Any=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=False , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=99 , UpperCAmelCase__ : Optional[Any]=32 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : Any=4 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : Union[str, Any]=0.02 , ) ->Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = rotary_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = None
A__ = vocab_size - 1
A__ = vocab_size - 1
A__ = vocab_size - 1
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length])
A__ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any]) ->Optional[int]:
'''simple docstring'''
A__ = 20
A__ = model_class_name(UpperCAmelCase__)
A__ = model.init_cache(input_ids.shape[0] , UpperCAmelCase__)
A__ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''')
A__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
A__ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , )
A__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''')
A__ = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase__ , )
A__ = model(UpperCAmelCase__)
A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""")
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int) ->Any:
'''simple docstring'''
A__ = 20
A__ = model_class_name(UpperCAmelCase__)
A__ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , )
A__ = model.init_cache(input_ids.shape[0] , UpperCAmelCase__)
A__ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1))
A__ = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , )
A__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''')
A__ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase__ , position_ids=UpperCAmelCase__ , )
A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__)
A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""")
@require_flax
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
UpperCAmelCase__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
'''simple docstring'''
A__ = FlaxGPTJModelTester(self)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
@tooslow
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''')
A__ = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__)
A__ = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''')
A__ = False
A__ = model.config.eos_token_id
A__ = jax.jit(model.generate)
A__ = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id).sequences
A__ = tokenizer.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__)
A__ = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__)
A__ = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
A__ = getattr(UpperCAmelCase__ , UpperCAmelCase__)
A__ , A__ = pt_inputs['''input_ids'''].shape
A__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(UpperCAmelCase__):
A__ = 0
A__ = 1
A__ = 0
A__ = 1
A__ = pt_model_class(UpperCAmelCase__).eval()
A__ = model_class(UpperCAmelCase__ , dtype=jnp.floataa)
A__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase__)
A__ = fx_state
with torch.no_grad():
A__ = pt_model(**UpperCAmelCase__).to_tuple()
A__ = fx_model(**UpperCAmelCase__).to_tuple()
self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__) , '''Output lengths differ between Flax and PyTorch''')
for fx_output, pt_output in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase__)
A__ = model_class.from_pretrained(UpperCAmelCase__ , from_pt=UpperCAmelCase__)
A__ = fx_model_loaded(**UpperCAmelCase__).to_tuple()
self.assertEqual(
len(UpperCAmelCase__) , len(UpperCAmelCase__) , '''Output lengths differ between Flax and PyTorch''')
for fx_output_loaded, pt_output in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2)
@is_pt_flax_cross_test
def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]:
'''simple docstring'''
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__)
A__ = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
A__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
A__ = getattr(UpperCAmelCase__ , UpperCAmelCase__)
A__ = pt_model_class(UpperCAmelCase__).eval()
A__ = model_class(UpperCAmelCase__ , dtype=jnp.floataa)
A__ = load_flax_weights_in_pytorch_model(UpperCAmelCase__ , fx_model.params)
A__ , A__ = pt_inputs['''input_ids'''].shape
A__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,))
for batch_idx, start_index in enumerate(UpperCAmelCase__):
A__ = 0
A__ = 1
A__ = 0
A__ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
A__ = pt_model(**UpperCAmelCase__).to_tuple()
A__ = fx_model(**UpperCAmelCase__).to_tuple()
self.assertEqual(len(UpperCAmelCase__) , len(UpperCAmelCase__) , '''Output lengths differ between Flax and PyTorch''')
for fx_output, pt_output in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase__)
A__ = pt_model_class.from_pretrained(UpperCAmelCase__ , from_flax=UpperCAmelCase__)
with torch.no_grad():
A__ = pt_model_loaded(**UpperCAmelCase__).to_tuple()
self.assertEqual(
len(UpperCAmelCase__) , len(UpperCAmelCase__) , '''Output lengths differ between Flax and PyTorch''')
for fx_output, pt_output in zip(UpperCAmelCase__ , UpperCAmelCase__):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2)
@tooslow
def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
A__ = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''')
A__ = model(np.ones((1, 1)))
self.assertIsNotNone(UpperCAmelCase__)
| 231 | 0 |
import re
import string
import numpy as np
import datasets
__A : str = '''\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'''
__A : Optional[Any] = '''\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'''
__A : Union[str, Any] = '''\n'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def lowercase__ ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def lowercase__ ( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : List[str]=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCAmelCase : int = np.array([re.sub(_lowerCamelCase , '' , _lowerCamelCase ) for x in predictions] )
lowerCAmelCase : List[Any] = np.array([re.sub(_lowerCamelCase , '' , _lowerCamelCase ) for x in references] )
else:
lowerCAmelCase : Optional[int] = np.asarray(_lowerCamelCase )
lowerCAmelCase : Any = np.asarray(_lowerCamelCase )
if ignore_case:
lowerCAmelCase : Optional[int] = np.char.lower(_lowerCamelCase )
lowerCAmelCase : Any = np.char.lower(_lowerCamelCase )
if ignore_punctuation:
lowerCAmelCase : List[str] = string.punctuation.maketrans('' , '' , string.punctuation )
lowerCAmelCase : Optional[int] = np.char.translate(_lowerCamelCase , table=_lowerCamelCase )
lowerCAmelCase : Optional[int] = np.char.translate(_lowerCamelCase , table=_lowerCamelCase )
if ignore_numbers:
lowerCAmelCase : Tuple = string.digits.maketrans('' , '' , string.digits )
lowerCAmelCase : List[Any] = np.char.translate(_lowerCamelCase , table=_lowerCamelCase )
lowerCAmelCase : Dict = np.char.translate(_lowerCamelCase , table=_lowerCamelCase )
lowerCAmelCase : int = predictions == references
return {"exact_match": np.mean(_lowerCamelCase ) * 100}
| 138 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _snake_case ( a__ , a__ , a__ ):
lowerCAmelCase :List[str] = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = 5_0257 , _lowerCamelCase = 1024 , _lowerCamelCase = 768 , _lowerCamelCase = 12 , _lowerCamelCase = 12 , _lowerCamelCase = None , _lowerCamelCase = "gelu_new" , _lowerCamelCase = 0.1 , _lowerCamelCase = 0.1 , _lowerCamelCase = 0.1 , _lowerCamelCase = 1e-5 , _lowerCamelCase = 0.02 , _lowerCamelCase = True , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = False , ):
super().__init__()
UpperCAmelCase__ : Optional[Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''')
UpperCAmelCase__ : str = prefix_inner_dim
UpperCAmelCase__ : Tuple = prefix_hidden_dim
UpperCAmelCase__ : Any = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCAmelCase__ : Optional[Any] = (
nn.Linear(self.prefix_hidden_dim , _lowerCamelCase) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCAmelCase__ : Optional[Any] = GPTaConfig(
vocab_size=_lowerCamelCase , n_positions=_lowerCamelCase , n_embd=_lowerCamelCase , n_layer=_lowerCamelCase , n_head=_lowerCamelCase , n_inner=_lowerCamelCase , activation_function=_lowerCamelCase , resid_pdrop=_lowerCamelCase , embd_pdrop=_lowerCamelCase , attn_pdrop=_lowerCamelCase , layer_norm_epsilon=_lowerCamelCase , initializer_range=_lowerCamelCase , scale_attn_weights=_lowerCamelCase , use_cache=_lowerCamelCase , scale_attn_by_inverse_layer_idx=_lowerCamelCase , reorder_and_upcast_attn=_lowerCamelCase , )
UpperCAmelCase__ : int = GPTaLMHeadModel(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , ):
UpperCAmelCase__ : Dict = self.transformer.transformer.wte(_lowerCamelCase)
UpperCAmelCase__ : Any = self.encode_prefix(_lowerCamelCase)
UpperCAmelCase__ : Tuple = self.decode_prefix(_lowerCamelCase)
UpperCAmelCase__ : List[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1)
if labels is not None:
UpperCAmelCase__ : str = self.get_dummy_token(input_ids.shape[0] , input_ids.device)
UpperCAmelCase__ : Tuple = torch.cat((dummy_token, input_ids) , dim=1)
UpperCAmelCase__ : Union[str, Any] = self.transformer(inputs_embeds=_lowerCamelCase , labels=_lowerCamelCase , attention_mask=_lowerCamelCase)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase):
return torch.zeros(_lowerCamelCase , self.prefix_length , dtype=torch.intaa , device=_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase):
return self.encode_prefix(_lowerCamelCase)
@torch.no_grad()
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Dict = torch.split(_lowerCamelCase , 1 , dim=0)
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : Any = []
for feature in features:
UpperCAmelCase__ : int = self.decode_prefix(feature.to(_lowerCamelCase)) # back to the clip feature
# Only support beam search for now
UpperCAmelCase__ , UpperCAmelCase__ : str = self.generate_beam(
input_embeds=_lowerCamelCase , device=_lowerCamelCase , eos_token_id=_lowerCamelCase)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
UpperCAmelCase__ : List[Any] = torch.stack(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = torch.stack(_lowerCamelCase)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case__ ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = 5 , _lowerCamelCase = 67 , _lowerCamelCase = 1.0 , _lowerCamelCase = None , ):
UpperCAmelCase__ : Dict = eos_token_id
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Any = torch.ones(_lowerCamelCase , device=_lowerCamelCase , dtype=torch.int)
UpperCAmelCase__ : Any = torch.zeros(_lowerCamelCase , device=_lowerCamelCase , dtype=torch.bool)
if input_embeds is not None:
UpperCAmelCase__ : Optional[int] = input_embeds
else:
UpperCAmelCase__ : Any = self.transformer.transformer.wte(_lowerCamelCase)
for i in range(_lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = self.transformer(inputs_embeds=_lowerCamelCase)
UpperCAmelCase__ : List[str] = outputs.logits
UpperCAmelCase__ : List[str] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCAmelCase__ : int = logits.softmax(-1).log()
if scores is None:
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = logits.topk(_lowerCamelCase , -1)
UpperCAmelCase__ : int = generated.expand(_lowerCamelCase , *generated.shape[1:])
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = next_tokens.permute(1 , 0), scores.squeeze(0)
if tokens is None:
UpperCAmelCase__ : Any = next_tokens
else:
UpperCAmelCase__ : Tuple = tokens.expand(_lowerCamelCase , *tokens.shape[1:])
UpperCAmelCase__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1)
else:
UpperCAmelCase__ : Any = -float(np.inf)
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Any = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCAmelCase__ : Optional[Any] = scores_sum / seq_lengths[:, None]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = scores_sum_average.view(-1).topk(_lowerCamelCase , -1)
UpperCAmelCase__ : str = next_tokens // scores_sum.shape[1]
UpperCAmelCase__ : Optional[int] = seq_lengths[next_tokens_source]
UpperCAmelCase__ : List[str] = next_tokens % scores_sum.shape[1]
UpperCAmelCase__ : List[Any] = next_tokens.unsqueeze(1)
UpperCAmelCase__ : Dict = tokens[next_tokens_source]
UpperCAmelCase__ : Optional[int] = torch.cat((tokens, next_tokens) , dim=1)
UpperCAmelCase__ : Optional[Any] = generated[next_tokens_source]
UpperCAmelCase__ : List[Any] = scores_sum_average * seq_lengths
UpperCAmelCase__ : Union[str, Any] = is_stopped[next_tokens_source]
UpperCAmelCase__ : Union[str, Any] = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0] , 1 , -1)
UpperCAmelCase__ : Union[str, Any] = torch.cat((generated, next_token_embed) , dim=1)
UpperCAmelCase__ : Union[str, Any] = is_stopped + next_tokens.eq(_lowerCamelCase).squeeze()
if is_stopped.all():
break
UpperCAmelCase__ : Tuple = scores / seq_lengths
UpperCAmelCase__ : Union[str, Any] = scores.argsort(descending=_lowerCamelCase)
# tokens tensors are already padded to max_seq_length
UpperCAmelCase__ : Optional[Any] = [tokens[i] for i in order]
UpperCAmelCase__ : Optional[Any] = torch.stack(_lowerCamelCase , dim=0)
UpperCAmelCase__ : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype)
return output_texts, seq_lengths | 163 | 0 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class a__ ( unittest.TestCase ):
'''simple docstring'''
lowercase__ : Union[str, Any] = MODEL_FOR_CAUSAL_LM_MAPPING
lowercase__ : Union[str, Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ = text_generator('''This is a test''' , do_sample=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
lowerCAmelCase__ = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
lowerCamelCase_ , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
lowerCAmelCase__ = text_generator('''This is a test''' , do_sample=lowerCamelCase_ , num_return_sequences=2 , return_tensors=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [
{'''generated_token_ids''': ANY(lowerCamelCase_ )},
{'''generated_token_ids''': ANY(lowerCamelCase_ )},
] , )
lowerCAmelCase__ = text_generator.model.config.eos_token_id
lowerCAmelCase__ = '''<pad>'''
lowerCAmelCase__ = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=lowerCamelCase_ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCamelCase_ , )
self.assertEqual(
lowerCamelCase_ , [
[
{'''generated_token_ids''': ANY(lowerCamelCase_ )},
{'''generated_token_ids''': ANY(lowerCamelCase_ )},
],
[
{'''generated_token_ids''': ANY(lowerCamelCase_ )},
{'''generated_token_ids''': ANY(lowerCamelCase_ )},
],
] , )
@require_tf
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
lowerCAmelCase__ = text_generator('''This is a test''' , do_sample=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
lowerCAmelCase__ = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> str:
lowerCAmelCase__ = TextGenerationPipeline(model=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
return text_generator, ["This is a test", "Another test"]
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = '''Hello I believe in'''
lowerCAmelCase__ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
lowerCAmelCase__ = text_generator(lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
lowerCAmelCase__ = text_generator(lowerCamelCase_ , stop_sequence=''' fe''' )
self.assertEqual(lowerCamelCase_ , [{'''generated_text''': '''Hello I believe in fe'''}] )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = text_generator.model
lowerCAmelCase__ = text_generator.tokenizer
lowerCAmelCase__ = text_generator('''This is a test''' )
self.assertEqual(lowerCamelCase_ , [{'''generated_text''': ANY(lowerCamelCase_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ = text_generator('''This is a test''' , return_full_text=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , [{'''generated_text''': ANY(lowerCamelCase_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ = pipeline(task='''text-generation''' , model=lowerCamelCase_ , tokenizer=lowerCamelCase_ , return_full_text=lowerCamelCase_ )
lowerCAmelCase__ = text_generator('''This is a test''' )
self.assertEqual(lowerCamelCase_ , [{'''generated_text''': ANY(lowerCamelCase_ )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
lowerCAmelCase__ = text_generator('''This is a test''' , return_full_text=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , [{'''generated_text''': ANY(lowerCamelCase_ )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
lowerCAmelCase__ = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [
[{'''generated_text''': ANY(lowerCamelCase_ )}, {'''generated_text''': ANY(lowerCamelCase_ )}],
[{'''generated_text''': ANY(lowerCamelCase_ )}, {'''generated_text''': ANY(lowerCamelCase_ )}],
] , )
if text_generator.tokenizer.pad_token is not None:
lowerCAmelCase__ = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCamelCase_ )
self.assertEqual(
lowerCamelCase_ , [
[{'''generated_text''': ANY(lowerCamelCase_ )}, {'''generated_text''': ANY(lowerCamelCase_ )}],
[{'''generated_text''': ANY(lowerCamelCase_ )}, {'''generated_text''': ANY(lowerCamelCase_ )}],
] , )
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = text_generator('''test''' , return_full_text=lowerCamelCase_ , return_text=lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = text_generator('''test''' , return_full_text=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ ):
lowerCAmelCase__ = text_generator('''test''' , return_text=lowerCamelCase_ , return_tensors=lowerCamelCase_ )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
lowerCAmelCase__ = text_generator('''''' )
self.assertEqual(lowerCamelCase_ , [{'''generated_text''': ANY(lowerCamelCase_ )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
lowerCAmelCase__ = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
lowerCAmelCase__ = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_00_00
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 5_00 , max_new_tokens=20 )
lowerCAmelCase__ = text_generator('''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(lowerCamelCase_ ):
text_generator(
'''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self ) -> str:
import torch
# Classic `model_kwargs`
lowerCAmelCase__ = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ = pipe('''This is a test''' )
self.assertEqual(
lowerCamelCase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
lowerCAmelCase__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
lowerCAmelCase__ = pipe('''This is a test''' )
self.assertEqual(
lowerCamelCase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
lowerCAmelCase__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
lowerCAmelCase__ = pipe('''This is a test''' )
self.assertEqual(
lowerCamelCase_ , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
import torch
lowerCAmelCase__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self ) -> str:
import torch
lowerCAmelCase__ = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=lowerCamelCase_ , top_p=0.5 )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = '''Hello world'''
lowerCAmelCase__ = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
lowerCAmelCase__ = logging.get_logger('''transformers.generation.tf_utils''' )
else:
lowerCAmelCase__ = logging.get_logger('''transformers.generation.utils''' )
lowerCAmelCase__ = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(lowerCamelCase_ ) as cl:
lowerCAmelCase__ = text_generator(lowerCamelCase_ , max_length=10 , max_new_tokens=1 )
self.assertIn(lowerCamelCase_ , cl.out )
# The user only sets one -> no warning
with CaptureLogger(lowerCamelCase_ ) as cl:
lowerCAmelCase__ = text_generator(lowerCamelCase_ , max_new_tokens=1 )
self.assertNotIn(lowerCamelCase_ , cl.out )
with CaptureLogger(lowerCamelCase_ ) as cl:
lowerCAmelCase__ = text_generator(lowerCamelCase_ , max_length=10 )
self.assertNotIn(lowerCamelCase_ , cl.out ) | 228 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A , A ) -> Optional[Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = to_pil_image(A )
lowerCAmelCase__ , lowerCAmelCase__ = pil_image.size
lowerCAmelCase__ = pytesseract.image_to_data(A , lang=A , output_type='''dict''' , config=A )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowerCAmelCase__ = [idx for idx, word in enumerate(A ) if not word.strip()]
lowerCAmelCase__ = [word for idx, word in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase__ = []
for x, y, w, h in zip(A , A , A , A ):
lowerCAmelCase__ = [x, y, x + w, y + h]
actual_boxes.append(A )
# finally, normalize the bounding boxes
lowerCAmelCase__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(A , A , A ) )
assert len(A ) == len(A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any = ["pixel_values"]
def __init__( self , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = PILImageResampling.BILINEAR , lowerCamelCase_ = True , lowerCamelCase_ = 1 / 2_55 , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = "" , **lowerCamelCase_ , ) -> None:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_value
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCAmelCase__ = apply_ocr
lowerCAmelCase__ = ocr_lang
lowerCAmelCase__ = tesseract_config
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = PILImageResampling.BILINEAR , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCAmelCase__ = (size['''height'''], size['''width'''])
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_=None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = ChannelDimension.FIRST , **lowerCamelCase_ , ) -> PIL.Image.Image:
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase__ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase__ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase__ = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(lowerCamelCase_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for image in images:
lowerCAmelCase__ , lowerCAmelCase__ = apply_tesseract(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
words_batch.append(lowerCamelCase_ )
boxes_batch.append(lowerCamelCase_ )
if do_resize:
lowerCAmelCase__ = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
lowerCAmelCase__ = BatchFeature(data={'''pixel_values''': images} , tensor_type=lowerCamelCase_ )
if apply_ocr:
lowerCAmelCase__ = words_batch
lowerCAmelCase__ = boxes_batch
return data | 228 | 1 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: int = LayoutLMTokenizer
lowerCamelCase__: str = LayoutLMTokenizerFast
lowerCamelCase__: Tuple = True
lowerCamelCase__: List[str] = True
def _lowerCamelCase ( self: Optional[int] ) -> str:
super().setUp()
__UpperCAmelCase : Union[str, Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _lowerCamelCase ( self: Optional[Any] , **__lowerCamelCase: Any ) -> Any:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Dict ) -> Optional[Any]:
__UpperCAmelCase : Optional[Any] = "UNwant\u00E9d,running"
__UpperCAmelCase : Any = "unwanted, running"
return input_text, output_text
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase : str = self.tokenizer_class(self.vocab_file )
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__lowerCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [7, 4, 5, 10, 8, 9] )
def _lowerCamelCase ( self: List[Any] ) -> Dict:
pass
| 157 | import argparse
import os
import re
_snake_case = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_snake_case = re.compile(r'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
_snake_case = re.compile(r'''\s*\(\s*"(\S[^"]+)"''')
def _UpperCamelCase ( snake_case__, snake_case__ = False ) -> List[Any]:
with open(snake_case__, "r", encoding="utf-8" ) as f:
__UpperCAmelCase : Dict = f.read()
__UpperCAmelCase : Optional[Any] = content.split("\n" )
__UpperCAmelCase : int = []
__UpperCAmelCase : Optional[int] = 0
while line_idx < len(snake_case__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__UpperCAmelCase : str = len(re.search(r"^(\s*)\S", lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
__UpperCAmelCase : Dict = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__UpperCAmelCase : str = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__UpperCAmelCase : Dict = sorted(snake_case__, key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(snake_case__, "w", encoding="utf-8" ) as f:
f.write("\n".join(snake_case__ ) )
elif "\n".join(snake_case__ ) != content:
return True
def _UpperCamelCase ( snake_case__ = False ) -> Any:
__UpperCAmelCase : str = [os.path.join(snake_case__, snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith(".py" )]
__UpperCAmelCase : Optional[Any] = [sort_auto_mapping(snake_case__, overwrite=snake_case__ ) for fname in fnames]
if not overwrite and any(snake_case__ ):
__UpperCAmelCase : List[Any] = [f for f, d in zip(snake_case__, snake_case__ ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {', '.join(snake_case__ )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
_snake_case = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 157 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 1_2_8, "min_length": 1_2, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 1_4_2, "min_length": 5_6, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 6_2, "min_length": 1_1, "num_beams": 6},
}
}
__lowerCAmelCase = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 1_2_8,
"task_specific_params.summarization.min_length": 1_2,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 1_4_2,
"task_specific_params.summarization_cnn.min_length": 5_6,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 6_2,
"task_specific_params.summarization_xsum.min_length": 1_1,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(_A ) , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(_A ) , x.transpose() ) )
__lowerCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
__lowerCAmelCase = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A ) , transpose(_A ).numpy() ) )
__lowerCAmelCase = np.random.randn(3 , 4 , 5 )
__lowerCAmelCase = torch.tensor(_A )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , transpose(_A , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
__lowerCAmelCase = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A ) , transpose(_A ).numpy() ) )
__lowerCAmelCase = np.random.randn(3 , 4 , 5 )
__lowerCAmelCase = tf.constant(_A )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , transpose(_A , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
__lowerCAmelCase = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A ) , np.asarray(transpose(_A ) ) ) )
__lowerCAmelCase = np.random.randn(3 , 4 , 5 )
__lowerCAmelCase = jnp.array(_A )
self.assertTrue(np.allclose(transpose(_A , axes=(1, 2, 0) ) , np.asarray(transpose(_A , axes=(1, 2, 0) ) ) ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , np.reshape(_A , (4, 3) ) ) )
__lowerCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(_A , (1_2, 5) ) , np.reshape(_A , (1_2, 5) ) ) )
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
__lowerCAmelCase = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , reshape(_A , (4, 3) ).numpy() ) )
__lowerCAmelCase = np.random.randn(3 , 4 , 5 )
__lowerCAmelCase = torch.tensor(_A )
self.assertTrue(np.allclose(reshape(_A , (1_2, 5) ) , reshape(_A , (1_2, 5) ).numpy() ) )
@require_tf
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
__lowerCAmelCase = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , reshape(_A , (4, 3) ).numpy() ) )
__lowerCAmelCase = np.random.randn(3 , 4 , 5 )
__lowerCAmelCase = tf.constant(_A )
self.assertTrue(np.allclose(reshape(_A , (1_2, 5) ) , reshape(_A , (1_2, 5) ).numpy() ) )
@require_flax
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
__lowerCAmelCase = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A , (4, 3) ) , np.asarray(reshape(_A , (4, 3) ) ) ) )
__lowerCAmelCase = np.random.randn(3 , 4 , 5 )
__lowerCAmelCase = jnp.array(_A )
self.assertTrue(np.allclose(reshape(_A , (1_2, 5) ) , np.asarray(reshape(_A , (1_2, 5) ) ) ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(_A ) , np.squeeze(_A ) ) )
__lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , np.squeeze(_A , axis=2 ) ) )
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(1 , 3 , 4 )
__lowerCAmelCase = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A ) , squeeze(_A ).numpy() ) )
__lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCAmelCase = torch.tensor(_A )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , squeeze(_A , axis=2 ).numpy() ) )
@require_tf
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(1 , 3 , 4 )
__lowerCAmelCase = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A ) , squeeze(_A ).numpy() ) )
__lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCAmelCase = tf.constant(_A )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , squeeze(_A , axis=2 ).numpy() ) )
@require_flax
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(1 , 3 , 4 )
__lowerCAmelCase = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A ) , np.asarray(squeeze(_A ) ) ) )
__lowerCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
__lowerCAmelCase = jnp.array(_A )
self.assertTrue(np.allclose(squeeze(_A , axis=2 ) , np.asarray(squeeze(_A , axis=2 ) ) ) )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , np.expand_dims(_A , axis=1 ) ) )
@require_torch
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
__lowerCAmelCase = torch.tensor(_A )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , expand_dims(_A , axis=1 ).numpy() ) )
@require_tf
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
__lowerCAmelCase = tf.constant(_A )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , expand_dims(_A , axis=1 ).numpy() ) )
@require_flax
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = np.random.randn(3 , 4 )
__lowerCAmelCase = jnp.array(_A )
self.assertTrue(np.allclose(expand_dims(_A , axis=1 ) , np.asarray(expand_dims(_A , axis=1 ) ) ) )
| 354 |
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
__lowerCAmelCase , __lowerCAmelCase = [], []
while len(SCREAMING_SNAKE_CASE_ ) > 1:
__lowerCAmelCase , __lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
start.append(SCREAMING_SNAKE_CASE_ )
end.append(SCREAMING_SNAKE_CASE_ )
collection.remove(SCREAMING_SNAKE_CASE_ )
collection.remove(SCREAMING_SNAKE_CASE_ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
UpperCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 102 | 0 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Tuple = "encodec"
def __init__( self : Dict , _UpperCAmelCase : List[Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , _UpperCAmelCase : Any=2_40_00 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Tuple=1_28 , _UpperCAmelCase : List[str]=32 , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : List[Any]=[8, 5, 4, 2] , _UpperCAmelCase : Union[str, Any]="weight_norm" , _UpperCAmelCase : Any=7 , _UpperCAmelCase : Optional[Any]=7 , _UpperCAmelCase : str=3 , _UpperCAmelCase : str=2 , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[Any]="reflect" , _UpperCAmelCase : Any=2 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=1.0 , _UpperCAmelCase : Optional[Any]=10_24 , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Optional[int]=True , **_UpperCAmelCase : str , ) -> str:
"""simple docstring"""
__lowercase = target_bandwidths
__lowercase = sampling_rate
__lowercase = audio_channels
__lowercase = normalize
__lowercase = chunk_length_s
__lowercase = overlap
__lowercase = hidden_size
__lowercase = num_filters
__lowercase = num_residual_layers
__lowercase = upsampling_ratios
__lowercase = norm_type
__lowercase = kernel_size
__lowercase = last_kernel_size
__lowercase = residual_kernel_size
__lowercase = dilation_growth_rate
__lowercase = use_causal_conv
__lowercase = pad_mode
__lowercase = compress
__lowercase = num_lstm_layers
__lowercase = trim_right_ratio
__lowercase = codebook_size
__lowercase = codebook_dim if codebook_dim is not None else hidden_size
__lowercase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**_UpperCAmelCase )
@property
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def a__ ( self : int ) -> int:
"""simple docstring"""
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 325 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Tuple = "mask2former"
lowerCAmelCase__ : List[Any] = ["swin"]
lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"}
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowercase = CONFIG_MAPPING['swin'](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = backbone_config.pop('model_type' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
__lowercase = backbone_config
__lowercase = feature_size
__lowercase = mask_feature_size
__lowercase = hidden_dim
__lowercase = encoder_feedforward_dim
__lowercase = activation_function
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = num_attention_heads
__lowercase = dropout
__lowercase = dim_feedforward
__lowercase = pre_norm
__lowercase = enforce_input_projection
__lowercase = common_stride
__lowercase = ignore_value
__lowercase = num_queries
__lowercase = no_object_weight
__lowercase = class_weight
__lowercase = mask_weight
__lowercase = dice_weight
__lowercase = train_num_points
__lowercase = oversample_ratio
__lowercase = importance_sample_ratio
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = use_auxiliary_loss
__lowercase = feature_strides
__lowercase = output_auxiliary_logits
__lowercase = decoder_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return cls(
backbone_config=_UpperCAmelCase , **_UpperCAmelCase , )
def a__ ( self : str ) -> Dict[str, any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 325 | 1 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : str , snake_case_ : Any , snake_case_ : List[Any]=13 , snake_case_ : str=7 , snake_case_ : Any=True , snake_case_ : List[str]=True , snake_case_ : Optional[Any]=False , snake_case_ : List[str]=True , snake_case_ : Any=99 , snake_case_ : List[str]=32 , snake_case_ : Tuple=5 , snake_case_ : Any=4 , snake_case_ : Dict=37 , snake_case_ : List[Any]="gelu" , snake_case_ : List[Any]=0.1 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Optional[int]=512 , snake_case_ : Dict=16 , snake_case_ : Union[str, Any]=2 , snake_case_ : List[str]=0.02 , snake_case_ : List[Any]=3 , snake_case_ : Optional[int]=4 , snake_case_ : str=None , ):
snake_case__ : str = parent
snake_case__ : List[str] = batch_size
snake_case__ : Dict = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : Any = use_input_mask
snake_case__ : int = use_token_type_ids
snake_case__ : Optional[int] = use_labels
snake_case__ : Dict = vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : int = num_attention_heads
snake_case__ : Any = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Optional[int] = max_position_embeddings
snake_case__ : int = type_vocab_size
snake_case__ : str = type_sequence_label_size
snake_case__ : List[str] = initializer_range
snake_case__ : Optional[Any] = num_labels
snake_case__ : Optional[Any] = num_choices
snake_case__ : Union[str, Any] = scope
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Optional[int] = None
if self.use_input_mask:
snake_case__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Any = None
if self.use_token_type_ids:
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Union[str, Any] = None
snake_case__ : str = None
snake_case__ : Dict = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : str ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : Tuple , snake_case_ : Any , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : int ):
snake_case__ : List[Any] = BioGptModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Dict = model(snake_case_ , attention_mask=snake_case_ )
snake_case__ : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : List[str] , ):
snake_case__ : List[str] = BioGptForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : str = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Dict , *snake_case_ : Optional[int] ):
snake_case__ : Any = BioGptModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# create attention mask
snake_case__ : Any = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case_ )
snake_case__ : Optional[int] = self.seq_length // 2
snake_case__ : int = 0
# first forward pass
snake_case__ , snake_case__ : Optional[int] = model(snake_case_ , attention_mask=snake_case_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case__ : List[Any] = ids_tensor((1,) , snake_case_ ).item() + 1
snake_case__ : Dict = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case__ : List[Any] = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : List[str] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case_ )] , dim=1 , )
# get two different outputs
snake_case__ : Dict = model(snake_case_ , attention_mask=snake_case_ )["""last_hidden_state"""]
snake_case__ : Union[str, Any] = model(snake_case_ , past_key_values=snake_case_ , attention_mask=snake_case_ )["""last_hidden_state"""]
# select random slice
snake_case__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[int] = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case__ : Union[str, Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def lowerCamelCase ( self : Tuple , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : List[str] , *snake_case_ : Optional[int] ):
snake_case__ : Optional[Any] = BioGptModel(config=snake_case_ ).to(snake_case_ ).eval()
snake_case__ : Tuple = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case_ )
# first forward pass
snake_case__ : Dict = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
snake_case__ , snake_case__ : Tuple = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : Optional[Any] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case__ : Any = model(snake_case_ , attention_mask=snake_case_ )["""last_hidden_state"""]
snake_case__ : Union[str, Any] = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[
"""last_hidden_state"""
]
# select random slice
snake_case__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : str , *snake_case_ : Dict , snake_case_ : Optional[int]=False ):
snake_case__ : Union[str, Any] = BioGptForCausalLM(snake_case_ )
model.to(snake_case_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case__ : Optional[int] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Optional[Any] , *snake_case_ : Optional[Any] ):
snake_case__ : int = BioGptModel(snake_case_ )
snake_case__ : List[str] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def lowerCamelCase ( self : Any , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Tuple , *snake_case_ : Optional[Any] ):
snake_case__ : Union[str, Any] = self.num_labels
snake_case__ : Dict = BioGptForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : List[str] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self : Tuple ):
snake_case__ : Dict = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : int = config_and_inputs
snake_case__ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowercase = (BioGptForCausalLM,) if is_torch_available() else ()
lowercase = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : Any = BioGptModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def lowerCamelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : List[Any] ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase ( self : List[str] ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : List[str] = type
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase ( self : Dict ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case_ )
def lowerCamelCase ( self : Dict ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case_ , gradient_checkpointing=snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case_ )
def lowerCamelCase ( self : List[str] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case_ )
@slow
def lowerCamelCase ( self : int ):
snake_case__ : List[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case_ )
snake_case__ : Optional[int] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
snake_case__ : Dict = """left"""
# Define PAD Token = EOS Token = 50256
snake_case__ : Dict = tokenizer.eos_token
snake_case__ : Optional[int] = model.config.eos_token_id
# use different length sentences to test batching
snake_case__ : Tuple = [
"""Hello, my dog is a little""",
"""Today, I""",
]
snake_case__ : List[Any] = tokenizer(snake_case_ , return_tensors="""pt""" , padding=snake_case_ )
snake_case__ : Tuple = inputs["""input_ids"""].to(snake_case_ )
snake_case__ : Dict = model.generate(
input_ids=snake_case_ , attention_mask=inputs["""attention_mask"""].to(snake_case_ ) , )
snake_case__ : str = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(snake_case_ )
snake_case__ : List[Any] = model.generate(input_ids=snake_case_ )
snake_case__ : int = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
snake_case__ : Tuple = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(snake_case_ )
snake_case__ : Dict = model.generate(input_ids=snake_case_ , max_length=model.config.max_length - num_paddings )
snake_case__ : Any = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
snake_case__ : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case_ )
snake_case__ : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case_ )
snake_case__ : List[Any] = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , [non_padded_sentence, padded_sentence] )
@slow
def lowerCamelCase ( self : Tuple ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : int = BioGptModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = 3
snake_case__ : Union[str, Any] = input_dict["""input_ids"""]
snake_case__ : List[str] = input_ids.ne(1 ).to(snake_case_ )
snake_case__ : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case__ : Union[str, Any] = BioGptForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Tuple = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCamelCase ( self : Tuple ):
snake_case__ , snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Tuple = 3
snake_case__ : str = """multi_label_classification"""
snake_case__ : Tuple = input_dict["""input_ids"""]
snake_case__ : Optional[int] = input_ids.ne(1 ).to(snake_case_ )
snake_case__ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case__ : Dict = BioGptForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Optional[int] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase ( self : Tuple ):
snake_case__ : Tuple = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
snake_case__ : Optional[Any] = torch.tensor([[2, 4_805, 9, 656, 21]] )
snake_case__ : str = model(snake_case_ )[0]
snake_case__ : List[Any] = 42_384
snake_case__ : Tuple = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case_ )
snake_case__ : Optional[int] = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
snake_case__ : Union[str, Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case_ )
torch.manual_seed(0 )
snake_case__ : Dict = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(snake_case_ )
snake_case__ : str = model.generate(
**snake_case_ , min_length=100 , max_length=1_024 , num_beams=5 , early_stopping=snake_case_ , )
snake_case__ : str = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case_ )
snake_case__ : int = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(snake_case_ , snake_case_ )
| 43 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Any ):
snake_case__ : int = []
def lowerCamelCase ( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Any , **snake_case_ : str ):
self.events.append("""on_init_end""" )
def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : List[Any] , **snake_case_ : List[str] ):
self.events.append("""on_train_begin""" )
def lowerCamelCase ( self : Dict , snake_case_ : Dict , snake_case_ : str , snake_case_ : int , **snake_case_ : str ):
self.events.append("""on_train_end""" )
def lowerCamelCase ( self : List[str] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : List[str] , **snake_case_ : int ):
self.events.append("""on_epoch_begin""" )
def lowerCamelCase ( self : List[Any] , snake_case_ : str , snake_case_ : List[str] , snake_case_ : List[Any] , **snake_case_ : Union[str, Any] ):
self.events.append("""on_epoch_end""" )
def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , **snake_case_ : str ):
self.events.append("""on_step_begin""" )
def lowerCamelCase ( self : Optional[Any] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Any , **snake_case_ : Optional[Any] ):
self.events.append("""on_step_end""" )
def lowerCamelCase ( self : List[str] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : int , **snake_case_ : List[Any] ):
self.events.append("""on_evaluate""" )
def lowerCamelCase ( self : int , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : Optional[int] , **snake_case_ : Any ):
self.events.append("""on_predict""" )
def lowerCamelCase ( self : int , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Dict , **snake_case_ : str ):
self.events.append("""on_save""" )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , **snake_case_ : Optional[int] ):
self.events.append("""on_log""" )
def lowerCamelCase ( self : Any , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , **snake_case_ : Tuple ):
self.events.append("""on_prediction_step""" )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Tuple ):
snake_case__ : List[Any] = tempfile.mkdtemp()
def lowerCamelCase ( self : List[Any] ):
shutil.rmtree(self.output_dir )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : List[Any]=0 , snake_case_ : List[Any]=0 , snake_case_ : List[str]=64 , snake_case_ : Optional[Any]=64 , snake_case_ : List[Any]=None , snake_case_ : Optional[int]=False , **snake_case_ : int ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : Optional[int] = RegressionDataset(length=snake_case_ )
snake_case__ : Dict = RegressionDataset(length=snake_case_ )
snake_case__ : Any = RegressionModelConfig(a=snake_case_ , b=snake_case_ )
snake_case__ : str = RegressionPreTrainedModel(snake_case_ )
snake_case__ : Any = TrainingArguments(self.output_dir , disable_tqdm=snake_case_ , report_to=[] , **snake_case_ )
return Trainer(
snake_case_ , snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , callbacks=snake_case_ , )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[int] ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
# Order doesn't matter
snake_case__ : int = sorted(snake_case_ , key=lambda snake_case_ : cb.__name__ if isinstance(snake_case_ , snake_case_ ) else cb.__class__.__name__ )
snake_case__ : Optional[int] = sorted(snake_case_ , key=lambda snake_case_ : cb.__name__ if isinstance(snake_case_ , snake_case_ ) else cb.__class__.__name__ )
for cba, cba in zip(snake_case_ , snake_case_ ):
if isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ):
self.assertEqual(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ) and not isinstance(snake_case_ , snake_case_ ):
self.assertEqual(snake_case_ , cba.__class__ )
elif not isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ):
self.assertEqual(cba.__class__ , snake_case_ )
else:
self.assertEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : str , snake_case_ : Optional[Any] ):
snake_case__ : Optional[Any] = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Optional[int] = 0
snake_case__ : Any = len(trainer.get_eval_dataloader() )
snake_case__ : Optional[int] = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(snake_case_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Tuple = self.get_trainer()
snake_case__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : int = self.get_trainer(disable_tqdm=snake_case_ )
snake_case__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Optional[int] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : Any = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(snake_case_ )
expected_callbacks.remove(snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[Any] = trainer.pop_callback(snake_case_ )
self.assertEqual(cb.__class__ , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
trainer.add_callback(snake_case_ )
expected_callbacks.insert(0 , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
# We can also add, pop, or remove by instance
snake_case__ : Optional[Any] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
trainer.remove_callback(snake_case_ )
expected_callbacks.remove(snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
snake_case__ : Any = self.get_trainer()
snake_case__ : Dict = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[int] = trainer.pop_callback(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
trainer.add_callback(snake_case_ )
expected_callbacks.insert(0 , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
def lowerCamelCase ( self : str ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=snake_case_ )
snake_case__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
# Independent log/save/eval
snake_case__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : Optional[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
# A bit of everything
snake_case__ : Optional[int] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(snake_case_ ) in warn_mock.call_args[0][0]
| 43 | 1 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
snake_case : Any = {}
snake_case : Union[str, Any] = job["""started_at"""]
snake_case : Tuple = job["""completed_at"""]
snake_case : str = date_parser.parse(lowercase )
snake_case : List[str] = date_parser.parse(lowercase )
snake_case : Optional[Any] = round((end_datetime - start_datetime).total_seconds() / 60.0 )
snake_case : Optional[int] = start
snake_case : Any = end
snake_case : Optional[Any] = duration_in_min
return job_info
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ) -> str:
snake_case : int = None
if token is not None:
snake_case : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
snake_case : Optional[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
snake_case : Optional[Any] = requests.get(lowercase ,headers=lowercase ).json()
snake_case : Tuple = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(lowercase ) for job in result["""jobs"""]} )
snake_case : Optional[Any] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(lowercase ):
snake_case : int = requests.get(url + f"""&page={i + 2}""" ,headers=lowercase ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(lowercase ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
lowerCamelCase : str = parser.parse_args()
lowerCamelCase : Optional[Any] = get_job_time(args.workflow_run_id)
lowerCamelCase : Any = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v["duration"]}""")
| 124 |
from typing import List
from .keymap import KEYMAP, get_character
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
def decorator(lowercase ):
snake_case : Tuple = getattr(lowercase ,"""handle_key""" ,[] )
handle += [key]
setattr(lowercase ,"""handle_key""" ,lowercase )
return func
return decorator
def SCREAMING_SNAKE_CASE__ ( *lowercase ) -> List[str]:
def decorator(lowercase ):
snake_case : Optional[int] = getattr(lowercase ,"""handle_key""" ,[] )
handle += keys
setattr(lowercase ,"""handle_key""" ,lowercase )
return func
return decorator
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __new__( cls , A , A , A ) -> str:
snake_case : int = super().__new__(cls , A , A , A )
if not hasattr(A , """key_handler""" ):
setattr(A , """key_handler""" , {} )
setattr(A , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
snake_case : Dict = getattr(A , """handle_key""" , [] )
for key in handled_keys:
snake_case : Any = value
return new_cls
@staticmethod
def UpperCAmelCase ( cls ) -> List[str]:
snake_case : Tuple = get_character()
if char != KEYMAP["undefined"]:
snake_case : str = ord(A )
snake_case : Optional[Any] = cls.key_handler.get(A )
if handler:
snake_case : Optional[Any] = char
return handler(cls )
else:
return None
def SCREAMING_SNAKE_CASE__ ( cls ) -> Tuple:
return KeyHandler(cls.__name__ ,cls.__bases__ ,cls.__dict__.copy() )
| 124 | 1 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : int = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class snake_case_( a__ ):
__UpperCamelCase = '''encodec'''
def __init__( self : Dict , UpperCamelCase_ : List[str]=[1.5, 3.0, 6.0, 12.0, 24.0] , UpperCamelCase_ : int=2_4_0_0_0 , UpperCamelCase_ : Optional[Any]=1 , UpperCamelCase_ : Dict=False , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=1_2_8 , UpperCamelCase_ : Union[str, Any]=3_2 , UpperCamelCase_ : str=1 , UpperCamelCase_ : Optional[int]=[8, 5, 4, 2] , UpperCamelCase_ : Optional[Any]="weight_norm" , UpperCamelCase_ : Dict=7 , UpperCamelCase_ : Dict=7 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]="reflect" , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : List[str]=1.0 , UpperCamelCase_ : Any=1_0_2_4 , UpperCamelCase_ : int=None , UpperCamelCase_ : int=True , **UpperCamelCase_ : Dict , ):
lowerCAmelCase : Optional[int] = target_bandwidths
lowerCAmelCase : Tuple = sampling_rate
lowerCAmelCase : Optional[Any] = audio_channels
lowerCAmelCase : Dict = normalize
lowerCAmelCase : int = chunk_length_s
lowerCAmelCase : Union[str, Any] = overlap
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Optional[Any] = num_filters
lowerCAmelCase : Dict = num_residual_layers
lowerCAmelCase : List[str] = upsampling_ratios
lowerCAmelCase : List[Any] = norm_type
lowerCAmelCase : Tuple = kernel_size
lowerCAmelCase : str = last_kernel_size
lowerCAmelCase : List[str] = residual_kernel_size
lowerCAmelCase : int = dilation_growth_rate
lowerCAmelCase : Tuple = use_causal_conv
lowerCAmelCase : Optional[int] = pad_mode
lowerCAmelCase : Any = compress
lowerCAmelCase : Optional[int] = num_lstm_layers
lowerCAmelCase : List[Any] = trim_right_ratio
lowerCAmelCase : Dict = codebook_size
lowerCAmelCase : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
lowerCAmelCase : Optional[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**UpperCamelCase_ )
@property
def lowerCamelCase__ ( self : Tuple ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCamelCase__ ( self : Tuple ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Dict = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowerCamelCase__ ( self : Optional[int] ):
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 314 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case_( a__ ):
__UpperCamelCase = (DDPMScheduler,)
def lowerCamelCase__ ( self : List[Any] , **UpperCamelCase_ : Union[str, Any] ):
lowerCAmelCase : Optional[Any] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCamelCase_ )
return config
def lowerCamelCase__ ( self : Optional[int] ):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def lowerCamelCase__ ( self : Tuple ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = self.scheduler_classes[0]
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1E-5
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ )
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : Union[str, Any] = pred_prev_sample
lowerCAmelCase : str = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase : Any = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Dict = len(UpperCamelCase_ )
lowerCAmelCase : Any = self.dummy_model()
lowerCAmelCase : Any = self.dummy_sample_deter
lowerCAmelCase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase_ ) ):
# 1. predict noise residual
lowerCAmelCase : str = model(UpperCamelCase_ , UpperCamelCase_ )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase : List[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase : List[Any] = pred_prev_sample
lowerCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowerCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Dict = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : int = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
lowerCAmelCase : Dict = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase_ ):
if i == len(UpperCamelCase_ ) - 1:
lowerCAmelCase : List[Any] = -1
else:
lowerCAmelCase : Union[str, Any] = timesteps[i + 1]
lowerCAmelCase : Any = scheduler.previous_timestep(UpperCamelCase_ )
lowerCAmelCase : Dict = prev_t.item()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase : List[Any] = self.get_scheduler_config()
lowerCAmelCase : Tuple = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(UpperCamelCase_ , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase : Any = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config()
lowerCAmelCase : str = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
lowerCAmelCase : int = len(UpperCamelCase_ )
with self.assertRaises(UpperCamelCase_ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : List[Any] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config()
lowerCAmelCase : Dict = scheduler_class(**UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase_ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase_ )
| 314 | 1 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
UpperCamelCase__ = """src/transformers"""
UpperCamelCase__ = """docs/source/en"""
UpperCamelCase__ = """."""
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start prompt.
__lowerCAmelCase = 0
while not lines[start_index].startswith(SCREAMING_SNAKE_CASE_ ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
while not lines[end_index].startswith(SCREAMING_SNAKE_CASE_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
UpperCamelCase__ = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
UpperCamelCase__ = re.compile(R"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
UpperCamelCase__ = re.compile(R"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
UpperCamelCase__ = re.compile(R"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] ):
__lowerCAmelCase = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , SCREAMING_SNAKE_CASE_ )
return [m.group(0 ) for m in matches]
def _a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__lowerCAmelCase = 2 if text == "✅" or text == "❌" else len(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = (width - text_length) // 2
__lowerCAmelCase = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _a ( ):
__lowerCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__lowerCAmelCase = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__lowerCAmelCase = {name: config.replace("Config" , "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__lowerCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = None
if attr_name.endswith("Tokenizer" ):
__lowerCAmelCase = slow_tokenizers
__lowerCAmelCase = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
__lowerCAmelCase = fast_tokenizers
__lowerCAmelCase = attr_name[:-13]
elif _re_tf_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
__lowerCAmelCase = tf_models
__lowerCAmelCase = _re_tf_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
elif _re_flax_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
__lowerCAmelCase = flax_models
__lowerCAmelCase = _re_flax_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
elif _re_pt_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
__lowerCAmelCase = pt_models
__lowerCAmelCase = _re_pt_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
if lookup_dict is not None:
while len(SCREAMING_SNAKE_CASE_ ) > 0:
if attr_name in model_name_to_prefix.values():
__lowerCAmelCase = True
break
# Try again after removing the last word in the name
__lowerCAmelCase = "".join(camel_case_split(SCREAMING_SNAKE_CASE_ )[:-1] )
# Let's build that table!
__lowerCAmelCase = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__lowerCAmelCase = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__lowerCAmelCase = [len(SCREAMING_SNAKE_CASE_ ) + 2 for c in columns]
__lowerCAmelCase = max([len(SCREAMING_SNAKE_CASE_ ) for name in model_names] ) + 2
# Build the table per se
__lowerCAmelCase = "|" + "|".join([_center_text(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for c, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
__lowerCAmelCase = {True: "✅", False: "❌"}
for name in model_names:
__lowerCAmelCase = model_name_to_prefix[name]
__lowerCAmelCase = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for l, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] ) + "|\n"
return table
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int]=False ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = _find_text_in_file(
filename=os.path.join(SCREAMING_SNAKE_CASE_ , "index.md" ) , start_prompt="<!--This table is updated automatically from the auto modules" , end_prompt="<!-- End table-->" , )
__lowerCAmelCase = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(SCREAMING_SNAKE_CASE_ , "index.md" ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCamelCase__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 92 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Optional[Any] ={
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] =[
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__snake_case : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 129 | 0 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
UpperCamelCase , UpperCamelCase = input_paths_and_base_extractors[compression_format]
if input_path is None:
UpperCamelCase = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase_ )
assert base_extractor.is_extractable(UpperCamelCase_ )
UpperCamelCase = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(UpperCamelCase_ , UpperCamelCase_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCamelCase = file_path.read_text(encoding="""utf-8""" )
else:
UpperCamelCase = output_path.read_text(encoding="""utf-8""" )
UpperCamelCase = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> List[str]:
'''simple docstring'''
UpperCamelCase = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
UpperCamelCase = input_paths[compression_format]
if input_path is None:
UpperCamelCase = f"""for '{compression_format}' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCamelCase_ )
UpperCamelCase = Extractor.infer_extractor_format(UpperCamelCase_ )
assert extractor_format is not None
UpperCamelCase = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
UpperCamelCase = file_path.read_text(encoding="""utf-8""" )
else:
UpperCamelCase = output_path.read_text(encoding="""utf-8""" )
UpperCamelCase = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
import tarfile
UpperCamelCase = tmp_path / """data_dot_dot"""
directory.mkdir()
UpperCamelCase = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(UpperCamelCase_ , """w""" ) as f:
f.add(UpperCamelCase_ , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def lowercase( UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
import tarfile
UpperCamelCase = tmp_path / """data_sym_link"""
directory.mkdir()
UpperCamelCase = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=UpperCamelCase_ )
with tarfile.TarFile(UpperCamelCase_ , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
UpperCamelCase = insecure_tar_files[insecure_tar_file]
UpperCamelCase = tmp_path / """extracted"""
TarExtractor.extract(UpperCamelCase_ , UpperCamelCase_ )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
UpperCamelCase = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(UpperCamelCase_ )
assert zipfile.is_zipfile(str(UpperCamelCase_ ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(UpperCamelCase_ ) # but we're right
| 367 | # Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_SCREAMING_SNAKE_CASE = TypeVar("""T""")
class SCREAMING_SNAKE_CASE_ ( Generic[T] ):
def __init__( self : Optional[Any] , lowerCamelCase_ : bool = True ):
"""simple docstring"""
UpperCamelCase = {} # dictionary of lists
UpperCamelCase = directed
def lowerCamelCase_ ( self : int , lowerCamelCase_ : T , lowerCamelCase_ : T ):
"""simple docstring"""
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
self.adj_list[destination_vertex].append(lowerCamelCase_ )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
UpperCamelCase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(lowerCamelCase_ )
UpperCamelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
UpperCamelCase = [destination_vertex]
UpperCamelCase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(lowerCamelCase_ )
UpperCamelCase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
UpperCamelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
UpperCamelCase = [destination_vertex]
UpperCamelCase = []
return self
def __repr__( self : Optional[int] ):
"""simple docstring"""
return pformat(self.adj_list )
| 165 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCAmelCase__ :Optional[Any] = datasets.load_iris()
lowerCAmelCase__ :List[str] = np.array(data['''data'''])
lowerCAmelCase__ :List[str] = np.array(data['''target'''])
lowerCAmelCase__ :Tuple = data['''target_names''']
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = train_test_split(X, y)
def lowerCAmelCase__ ( a__: Union[str, Any] , a__: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return np.linalg.norm(np.array(_A ) - np.array(_A ) )
def lowerCAmelCase__ ( a__: str , a__: Union[str, Any] , a__: List[str] , a__: Any , a__: Union[str, Any]=5 ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = zip(_A , _A )
# List of distances of all points from the point to be classified
_UpperCAmelCase = []
for data_point in data:
_UpperCAmelCase = euclidean_distance(data_point[0] , _A )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_UpperCAmelCase = [i[1] for i in sorted(_A )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_UpperCAmelCase = Counter(_A ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 329 | '''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a__( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Dict = MvpTokenizer
UpperCAmelCase_ : Optional[Any] = MvpTokenizerFast
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = filter_roberta_detectors
def a_ ( self):
"""simple docstring"""
super().setUp()
lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowerCAmelCase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase))))
lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(__lowerCAmelCase) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(__lowerCAmelCase))
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""")
@cached_property
def a_ ( self):
"""simple docstring"""
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""")
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , max_length=len(__lowerCAmelCase) , padding=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
# Test that special tokens are reset
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors="""pt""")
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , __lowerCAmelCase)
self.assertIn("""attention_mask""" , __lowerCAmelCase)
self.assertNotIn("""labels""" , __lowerCAmelCase)
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase)
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""")
self.assertEqual(32 , targets["""input_ids"""].shape[1])
@require_torch
def a_ ( self):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""")
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase)
self.assertEqual(batch.input_ids.shape , (2, 1024))
@require_torch
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = ["""A long paragraph for summarization."""]
lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCAmelCase = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase , return_tensors="""pt""")
lowerCAmelCase = inputs["""input_ids"""]
lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
def a_ ( self):
"""simple docstring"""
pass
def a_ ( self):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = """A, <mask> AllenNLP sentence."""
lowerCAmelCase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
lowerCAmelCase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase)
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""]))
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , )
lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""])
lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""])
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
| 272 | 0 |
from __future__ import annotations
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = 0.0_0
SCREAMING_SNAKE_CASE_: Optional[int] = 0
for resistor in resistors:
if resistor <= 0:
SCREAMING_SNAKE_CASE_: str = f"Resistor at index {index} has a negative or zero value!"
raise ValueError(_UpperCAmelCase )
first_sum += 1 / float(_UpperCAmelCase )
index += 1
return 1 / first_sum
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = 0.0_0
SCREAMING_SNAKE_CASE_: Optional[Any] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
SCREAMING_SNAKE_CASE_: Optional[int] = f"Resistor at index {index} has a negative value!"
raise ValueError(_UpperCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = -1
SCREAMING_SNAKE_CASE_: Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: int = TextStreamer(lowerCAmelCase__)
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_: Union[str, Any] = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = -1
SCREAMING_SNAKE_CASE_: int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = tokenizer.decode(greedy_ids[0])
SCREAMING_SNAKE_CASE_: int = TextIteratorStreamer(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE_: Tuple = Thread(target=model.generate , kwargs=lowerCAmelCase__)
thread.start()
SCREAMING_SNAKE_CASE_: Optional[Any] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: int = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = -1
SCREAMING_SNAKE_CASE_: Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = greedy_ids[:, input_ids.shape[1] :]
SCREAMING_SNAKE_CASE_: Union[str, Any] = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: Dict = TextStreamer(lowerCAmelCase__ , skip_prompt=lowerCAmelCase__)
model.generate(lowerCAmelCase__ , max_new_tokens=10 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
SCREAMING_SNAKE_CASE_: Any = cs.out[:-1]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("distilgpt2")
SCREAMING_SNAKE_CASE_: List[str] = AutoModelForCausalLM.from_pretrained("distilgpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = -1
SCREAMING_SNAKE_CASE_: List[str] = torch.ones((1, 5) , device=lowerCAmelCase__).long() * model.config.bos_token_id
with CaptureStdout() as cs:
SCREAMING_SNAKE_CASE_: Union[str, Any] = TextStreamer(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
model.generate(lowerCAmelCase__ , max_new_tokens=1 , do_sample=lowerCAmelCase__ , streamer=lowerCAmelCase__)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
SCREAMING_SNAKE_CASE_: str = cs.out[:-1] # Remove the final "\n"
SCREAMING_SNAKE_CASE_: Tuple = tokenizer(lowerCAmelCase__ , return_tensors="pt")
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
SCREAMING_SNAKE_CASE_: List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = -1
SCREAMING_SNAKE_CASE_: List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = TextIteratorStreamer(lowerCAmelCase__ , timeout=0.001)
SCREAMING_SNAKE_CASE_: Any = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
SCREAMING_SNAKE_CASE_: Optional[Any] = Thread(target=model.generate , kwargs=lowerCAmelCase__)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = ""
for new_text in streamer:
streamer_text += new_text
| 127 | 1 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_A : int = logging.get_logger(__name__)
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = WavaVecaForSequenceClassification.from_pretrained(UpperCAmelCase , config=UpperCAmelCase )
lowerCamelCase__ : Dict = downstream_dict['''projector.weight''']
lowerCamelCase__ : Dict = downstream_dict['''projector.bias''']
lowerCamelCase__ : int = downstream_dict['''model.post_net.linear.weight''']
lowerCamelCase__ : List[Any] = downstream_dict['''model.post_net.linear.bias''']
return model
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : List[Any] = WavaVecaForAudioFrameClassification.from_pretrained(UpperCAmelCase , config=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = downstream_dict['''model.linear.weight''']
lowerCamelCase__ : List[Any] = downstream_dict['''model.linear.bias''']
return model
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : str = WavaVecaForXVector.from_pretrained(UpperCAmelCase , config=UpperCAmelCase )
lowerCamelCase__ : List[Any] = downstream_dict['''connector.weight''']
lowerCamelCase__ : Tuple = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCamelCase__ : Optional[Any] = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
lowerCamelCase__ : Dict = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
lowerCamelCase__ : int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
lowerCamelCase__ : Optional[int] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
lowerCamelCase__ : str = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
lowerCamelCase__ : List[str] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
lowerCamelCase__ : Any = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Dict = torch.load(UpperCAmelCase , map_location='''cpu''' )
lowerCamelCase__ : int = checkpoint['''Downstream''']
lowerCamelCase__ : str = WavaVecaConfig.from_pretrained(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , do_normalize=UpperCAmelCase )
lowerCamelCase__ : Tuple = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
lowerCamelCase__ : str = convert_classification(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
elif arch.endswith('''ForAudioFrameClassification''' ):
lowerCamelCase__ : List[str] = convert_diarization(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
elif arch.endswith('''ForXVector''' ):
lowerCamelCase__ : Optional[int] = convert_xvector(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
lowerCamelCase__ : Tuple = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(UpperCAmelCase )
hf_model.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
_A : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
_A : List[Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 142 |
import argparse
import os
import re
import packaging.version
_A : Optional[int] = 'examples/'
_A : str = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_A : Any = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
_A : List[str] = 'README.md'
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase__ : Tuple = f.read()
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase__ : Union[str, Any] = replace.replace('''VERSION''' , UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = re_pattern.sub(UpperCAmelCase , UpperCAmelCase )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(UpperCAmelCase )
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
for folder, directories, fnames in os.walk(UpperCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , pattern='''examples''' )
def _a ( UpperCAmelCase , UpperCAmelCase=False ) -> Dict:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not patch:
update_version_in_examples(UpperCAmelCase )
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Any = '''🤗 Transformers currently provides the following architectures'''
lowerCamelCase__ : Dict = '''1. Want to contribute a new model?'''
with open(UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase__ : str = f.readlines()
# Find the start of the list.
lowerCamelCase__ : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase__ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowerCamelCase__ : Any = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''' , '''https://huggingface.co/docs/diffusers/model_doc''' , )
index += 1
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(UpperCAmelCase )
def _a ( ) -> Any:
"""simple docstring"""
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowerCamelCase__ : List[str] = f.read()
lowerCamelCase__ : Any = REPLACE_PATTERNS['''init'''][0].search(UpperCAmelCase ).groups()[0]
return packaging.version.parse(UpperCAmelCase )
def _a ( UpperCAmelCase=False ) -> str:
"""simple docstring"""
lowerCamelCase__ : List[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowerCamelCase__ : Union[str, Any] = default_version.base_version
elif patch:
lowerCamelCase__ : str = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
lowerCamelCase__ : Dict = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
lowerCamelCase__ : str = input(f"Which version are you releasing? [{default_version}]" )
if len(UpperCAmelCase ) == 0:
lowerCamelCase__ : int = default_version
print(f"Updating version to {version}." )
global_version_update(UpperCAmelCase , patch=UpperCAmelCase )
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = get_version()
lowerCamelCase__ : Optional[int] = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
lowerCamelCase__ : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
lowerCamelCase__ : Dict = input(f"Which version are we developing now? [{dev_version}]" )
if len(UpperCAmelCase ) == 0:
lowerCamelCase__ : Tuple = dev_version
print(f"Updating version to {version}." )
global_version_update(UpperCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_A : Any = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_A : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 142 | 1 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _lowercase ( UpperCamelCase_ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for rt in rc.restypes:
SCREAMING_SNAKE_CASE__ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
SCREAMING_SNAKE_CASE__ = {name: i for i, name in enumerate(UpperCamelCase_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
SCREAMING_SNAKE_CASE__ = torch.tensor(
UpperCamelCase_ , dtype=torch.intaa , device=protein['aatype'].device , )
SCREAMING_SNAKE_CASE__ = torch.tensor(
UpperCamelCase_ , dtype=torch.intaa , device=protein['aatype'].device , )
SCREAMING_SNAKE_CASE__ = torch.tensor(
UpperCamelCase_ , dtype=torch.floataa , device=protein['aatype'].device , )
SCREAMING_SNAKE_CASE__ = protein['aatype'].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
SCREAMING_SNAKE_CASE__ = restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE__ = restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE__ = residx_atomaa_mask
SCREAMING_SNAKE_CASE__ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
SCREAMING_SNAKE_CASE__ = restype_atomaa_to_atomaa[protein_aatype]
SCREAMING_SNAKE_CASE__ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
SCREAMING_SNAKE_CASE__ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
SCREAMING_SNAKE_CASE__ = rc.restype_atoa[restype_letter]
SCREAMING_SNAKE_CASE__ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
SCREAMING_SNAKE_CASE__ = rc.atom_order[atom_name]
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = restype_atomaa_mask[protein_aatype]
SCREAMING_SNAKE_CASE__ = residx_atomaa_mask
return protein
def _lowercase ( UpperCamelCase_ ) -> Dict[str, np.ndarray]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tree_map(lambda UpperCamelCase_ : torch.tensor(UpperCamelCase_ , device=batch['aatype'].device ) , UpperCamelCase_ , np.ndarray )
SCREAMING_SNAKE_CASE__ = tensor_tree_map(lambda UpperCamelCase_ : np.array(UpperCamelCase_ ) , make_atomaa_masks(UpperCamelCase_ ) )
return out
| 169 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES
else:
SCREAMING_SNAKE_CASE__ = {tokenizer_name: getattr(UpperCamelCase_ , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES[tokenizer_name]
SCREAMING_SNAKE_CASE__ = True
if checkpoint_name is None:
SCREAMING_SNAKE_CASE__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
SCREAMING_SNAKE_CASE__ = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained(UpperCamelCase_ , force_download=UpperCamelCase_ )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = checkpoint.split('/' )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
elif add_prefix:
SCREAMING_SNAKE_CASE__ = checkpoint
SCREAMING_SNAKE_CASE__ = dump_path
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
SCREAMING_SNAKE_CASE__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
SCREAMING_SNAKE_CASE__ = file_path.split(UpperCamelCase_ )[-1][0]
if next_char == "/":
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
SCREAMING_SNAKE_CASE__ = tokenizer.save_pretrained(
UpperCamelCase_ , legacy_format=UpperCamelCase_ , filename_prefix=UpperCamelCase_ )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(UpperCamelCase_ )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
__snake_case = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 169 | 1 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : int = set()
# edges = list of graph's edges
UpperCAmelCase__ : List[str] = get_edges(A__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = edges.pop()
chosen_vertices.add(A__ )
chosen_vertices.add(A__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(A__ )
return chosen_vertices
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : int = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}") | 163 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase__( __lowerCamelCase):
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = SMALL_MODEL_IDENTIFIER
__lowerCamelCase = """pt"""
__lowerCamelCase = """tf"""
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase_ )
model_tf.save_pretrained(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = """mock_framework"""
# Framework provided - return whatever the user provides
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(UpperCamelCase_ )
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_tf )
# Both in environment -> use PyTorch
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(UpperCamelCase_ , self.framework_pt )
# Both not in environment -> raise error
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
__lowerCamelCase = MagicMock(return_value=UpperCamelCase_ )
with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase_ ), patch(
"""transformers.onnx.features.is_torch_available""" , UpperCamelCase_ ):
with self.assertRaises(UpperCamelCase_ ):
__lowerCamelCase = FeaturesManager.determine_framework(self.test_model )
| 12 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowercase_ ( _lowerCamelCase : Dict):
lowercase__ : int = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int]):
lowercase__ , lowercase__ : int = emb.weight.shape
lowercase__ : List[str] = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
lowercase__ : Dict = emb.weight.data
return lin_layer
def lowercase_ ( _lowerCamelCase : Optional[Any]):
lowercase__ : int = torch.load(_lowerCamelCase , map_location="cpu")
lowercase__ : List[Any] = mam_aaa["args"] or mam_aaa["cfg"]["model"]
lowercase__ : int = mam_aaa["model"]
remove_ignore_keys_(_lowerCamelCase)
lowercase__ : str = state_dict["encoder.embed_tokens.weight"].shape[0]
lowercase__ : Any = MaMaaaConfig(
vocab_size=_lowerCamelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
lowercase__ : Optional[int] = state_dict["decoder.embed_tokens.weight"]
lowercase__ : int = MaMaaaForConditionalGeneration(_lowerCamelCase)
model.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase)
lowercase__ : Optional[Any] = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase = parser.parse_args()
UpperCamelCase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 333 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : str):
lowercase__ : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
lowercase__ : str = 1024
lowercase__ : List[str] = 4096
lowercase__ : List[Any] = 24
lowercase__ : Dict = 16
lowercase__ : Union[str, Any] = [5, 11, 17, 23]
lowercase__ : Any = [256, 512, 1024, 1024]
lowercase__ : Optional[int] = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase__ : Union[str, Any] = True
lowercase__ : Tuple = 150
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : str = "ade20k-id2label.json"
lowercase__ : Union[str, Any] = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r"))
lowercase__ : Union[str, Any] = {int(_lowerCamelCase): v for k, v in idalabel.items()}
lowercase__ : Optional[Any] = idalabel
lowercase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowercase__ : Tuple = [1, 150, 480, 480]
return config, expected_shape
def lowercase_ ( _lowerCamelCase : List[Any]):
lowercase__ : int = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Tuple):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase__ : Dict = name.replace("pretrained.model" , "dpt.encoder")
if "pretrained.model" in name:
lowercase__ : List[str] = name.replace("pretrained.model" , "dpt.embeddings")
if "patch_embed" in name:
lowercase__ : Any = name.replace("patch_embed" , "patch_embeddings")
if "pos_embed" in name:
lowercase__ : Union[str, Any] = name.replace("pos_embed" , "position_embeddings")
if "attn.proj" in name:
lowercase__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense")
if "proj" in name and "project" not in name:
lowercase__ : int = name.replace("proj" , "projection")
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layer")
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense")
if "mlp.fc2" in name:
lowercase__ : Optional[int] = name.replace("mlp.fc2" , "output.dense")
if "norm1" in name:
lowercase__ : List[str] = name.replace("norm1" , "layernorm_before")
if "norm2" in name:
lowercase__ : Dict = name.replace("norm2" , "layernorm_after")
if "scratch.output_conv" in name:
lowercase__ : Union[str, Any] = name.replace("scratch.output_conv" , "head")
if "scratch" in name:
lowercase__ : str = name.replace("scratch" , "neck")
if "layer1_rn" in name:
lowercase__ : int = name.replace("layer1_rn" , "convs.0")
if "layer2_rn" in name:
lowercase__ : int = name.replace("layer2_rn" , "convs.1")
if "layer3_rn" in name:
lowercase__ : Tuple = name.replace("layer3_rn" , "convs.2")
if "layer4_rn" in name:
lowercase__ : Union[str, Any] = name.replace("layer4_rn" , "convs.3")
if "refinenet" in name:
lowercase__ : Dict = int(name[len("neck.refinenet") : len("neck.refinenet") + 1])
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase__ : str = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4)}''')
if "out_conv" in name:
lowercase__ : str = name.replace("out_conv" , "projection")
if "resConfUnit1" in name:
lowercase__ : int = name.replace("resConfUnit1" , "residual_layer1")
if "resConfUnit2" in name:
lowercase__ : Optional[Any] = name.replace("resConfUnit2" , "residual_layer2")
if "conv1" in name:
lowercase__ : List[Any] = name.replace("conv1" , "convolution1")
if "conv2" in name:
lowercase__ : Tuple = name.replace("conv2" , "convolution2")
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0")
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0")
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0")
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase__ : List[Any] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0")
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase__ : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection")
if "pretrained.act_postprocess1.4" in name:
lowercase__ : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize")
if "pretrained.act_postprocess2.3" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection")
if "pretrained.act_postprocess2.4" in name:
lowercase__ : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize")
if "pretrained.act_postprocess3.3" in name:
lowercase__ : Dict = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection")
if "pretrained.act_postprocess4.3" in name:
lowercase__ : Any = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection")
if "pretrained.act_postprocess4.4" in name:
lowercase__ : int = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize")
if "pretrained" in name:
lowercase__ : Any = name.replace("pretrained" , "dpt")
if "bn" in name:
lowercase__ : str = name.replace("bn" , "batch_norm")
if "head" in name:
lowercase__ : Optional[Any] = name.replace("head" , "head.head")
if "encoder.norm" in name:
lowercase__ : Tuple = name.replace("encoder.norm" , "layernorm")
if "auxlayer" in name:
lowercase__ : int = name.replace("auxlayer" , "auxiliary_head.head")
return name
def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str):
for i in range(config.num_hidden_layers):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''')
lowercase__ : Union[str, Any] = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Optional[int] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Optional[int] = in_proj_bias[: config.hidden_size]
lowercase__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase__ : int = in_proj_bias[-config.hidden_size :]
def lowercase_ ( ):
lowercase__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : Optional[int] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase).raw)
return im
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict):
lowercase__ , lowercase__ : Optional[int] = get_dpt_config(_lowerCamelCase)
# load original state_dict from URL
lowercase__ : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu")
# remove certain keys
remove_ignore_keys_(_lowerCamelCase)
# rename keys
for key in state_dict.copy().keys():
lowercase__ : List[str] = state_dict.pop(_lowerCamelCase)
lowercase__ : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase)
# load HuggingFace model
lowercase__ : Any = DPTForSemanticSegmentation(_lowerCamelCase) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase)
model.load_state_dict(_lowerCamelCase)
model.eval()
# Check outputs on an image
lowercase__ : Optional[Any] = 480 if "ade" in checkpoint_url else 384
lowercase__ : Union[str, Any] = DPTImageProcessor(size=_lowerCamelCase)
lowercase__ : List[str] = prepare_img()
lowercase__ : Dict = image_processor(_lowerCamelCase , return_tensors="pt")
# forward pass
lowercase__ : Tuple = model(**_lowerCamelCase).logits if "ade" in checkpoint_url else model(**_lowerCamelCase).predicted_depth
# Assert logits
lowercase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]])
if "ade" in checkpoint_url:
lowercase__ : List[str] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]])
assert outputs.shape == torch.Size(_lowerCamelCase)
assert (
torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4)
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase)
)
Path(_lowerCamelCase).mkdir(exist_ok=_lowerCamelCase)
print(f'''Saving model to {pytorch_dump_folder_path}''')
model.save_pretrained(_lowerCamelCase)
print(f'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(_lowerCamelCase)
if push_to_hub:
print("Pushing model to hub...")
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
UpperCamelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 333 | 1 |
'''simple docstring'''
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_A = grid[0]
for row_n in range(1 , len(__lowercase ) ):
_A = grid[row_n]
_A = fill_row(__lowercase , __lowercase )
_A = grid[row_n]
return grid[-1][-1]
def __lowercase ( __lowercase , __lowercase ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> Union[str, Any]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = self.vocab_size - 1
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
A_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = OpenAIGPTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = OpenAIGPTLMHeadModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = OpenAIGPTDoubleHeadsModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = OpenAIGPTForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class A__ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
lowercase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Union[str, Any]:
'''simple docstring'''
A_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
A_ = inputs_dict["""labels"""]
A_ = inputs_dict["""labels"""]
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCamelCase__ , )
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = OpenAIGPTModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , n_embd=37 )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = OpenAIGPTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(UpperCamelCase__ )
A_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=UpperCamelCase__ ) # the president is
A_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase__ )
| 162 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Optional[Any] = "beit"
def __init__(self ,_lowerCamelCase=8192 ,_lowerCamelCase=768 ,_lowerCamelCase=12 ,_lowerCamelCase=12 ,_lowerCamelCase=3072 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=1E-1_2 ,_lowerCamelCase=224 ,_lowerCamelCase=16 ,_lowerCamelCase=3 ,_lowerCamelCase=False ,_lowerCamelCase=False ,_lowerCamelCase=False ,_lowerCamelCase=False ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=True ,_lowerCamelCase=[3, 5, 7, 11] ,_lowerCamelCase=[1, 2, 3, 6] ,_lowerCamelCase=True ,_lowerCamelCase=0.4 ,_lowerCamelCase=256 ,_lowerCamelCase=1 ,_lowerCamelCase=False ,_lowerCamelCase=255 ,**_lowerCamelCase ,) -> Any:
'''simple docstring'''
super().__init__(**_lowerCamelCase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = use_mask_token
__lowercase = use_absolute_position_embeddings
__lowercase = use_relative_position_bias
__lowercase = use_shared_relative_position_bias
__lowercase = layer_scale_init_value
__lowercase = drop_path_rate
__lowercase = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowercase = out_indices
__lowercase = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowercase = use_auxiliary_head
__lowercase = auxiliary_loss_weight
__lowercase = auxiliary_channels
__lowercase = auxiliary_num_convs
__lowercase = auxiliary_concat_input
__lowercase = semantic_loss_ignore_index
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Union[str, Any] = version.parse("1.11" )
@property
def _UpperCAmelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _UpperCAmelCase (self ) -> float:
'''simple docstring'''
return 1E-4
| 367 |
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int = 6_0_0_8_5_1_4_7_5_1_4_3 ):
try:
__lowercase = int(lowerCamelCase_ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__lowercase = 1
__lowercase = 2
while i * i <= n:
while n % i == 0:
__lowercase = i
n //= i
i += 1
if n > 1:
__lowercase = n
return int(lowerCamelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 217 | 0 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :torch.FloatTensor
_SCREAMING_SNAKE_CASE :torch.FloatTensor
class __a (UpperCamelCase_ , UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = 1
@register_to_config
def __init__( self , _a = 2_000 , _a = 0.15 , _a = 0.01 , _a = 1_348.0 , _a = 1E-5 , _a = 1 , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = sigma_max
# setable values
SCREAMING_SNAKE_CASE__ : Optional[int] = None
self.set_sigmas(_a , _a , _a , _a )
def _a ( self , _a , _a = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _a ( self , _a , _a = None , _a = None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.linspace(1 , _a , _a , device=_a )
def _a ( self , _a , _a = None , _a = None , _a = None ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min
SCREAMING_SNAKE_CASE__ : List[Any] = sigma_max if sigma_max is not None else self.config.sigma_max
SCREAMING_SNAKE_CASE__ : List[str] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(_a , _a )
SCREAMING_SNAKE_CASE__ : Any = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
SCREAMING_SNAKE_CASE__ : List[str] = torch.exp(torch.linspace(math.log(_a ) , math.log(_a ) , _a ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _a ( self , _a , _a ) -> Optional[int]:
"""simple docstring"""
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _a ( self , _a , _a , _a , _a = None , _a = True , ) -> Union[SdeVeOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
SCREAMING_SNAKE_CASE__ : List[Any] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
SCREAMING_SNAKE_CASE__ : str = timesteps.to(self.discrete_sigmas.device )
SCREAMING_SNAKE_CASE__ : List[Any] = self.discrete_sigmas[timesteps].to(sample.device )
SCREAMING_SNAKE_CASE__ : Dict = self.get_adjacent_sigma(_a , _a ).to(sample.device )
SCREAMING_SNAKE_CASE__ : Tuple = torch.zeros_like(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
SCREAMING_SNAKE_CASE__ : Any = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = diffusion.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ : Tuple = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
SCREAMING_SNAKE_CASE__ : List[str] = randn_tensor(
sample.shape , layout=sample.layout , generator=_a , device=sample.device , dtype=sample.dtype )
SCREAMING_SNAKE_CASE__ : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
SCREAMING_SNAKE_CASE__ : Optional[Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=_a , prev_sample_mean=_a )
def _a ( self , _a , _a , _a = None , _a = True , ) -> Union[SchedulerOutput, Tuple]:
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
SCREAMING_SNAKE_CASE__ : Optional[int] = randn_tensor(sample.shape , layout=sample.layout , generator=_a ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE__ : int = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
SCREAMING_SNAKE_CASE__ : Optional[int] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
SCREAMING_SNAKE_CASE__ : Any = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
SCREAMING_SNAKE_CASE__ : List[str] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
SCREAMING_SNAKE_CASE__ : Any = step_size.unsqueeze(-1 )
SCREAMING_SNAKE_CASE__ : str = sample + step_size * model_output
SCREAMING_SNAKE_CASE__ : Optional[int] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def _a ( self , _a , _a , _a , ) -> torch.FloatTensor:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE__ : Tuple = self.discrete_sigmas.to(original_samples.device )[timesteps]
SCREAMING_SNAKE_CASE__ : str = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(_a ) * sigmas[:, None, None, None]
)
SCREAMING_SNAKE_CASE__ : Optional[int] = noise + original_samples
return noisy_samples
def __len__( self ) -> List[str]:
"""simple docstring"""
return self.config.num_train_timesteps
| 132 |
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
a :Union[str, Any] = 500_000
a ,a :Union[str, Any] = os.path.split(__file__)
a :Union[str, Any] = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def _lowercase ( __lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = dataset.map(**__lowerCAmelCase )
@get_duration
def _lowercase ( __lowerCAmelCase , **__lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : List[str] = dataset.filter(**__lowerCAmelCase )
def _lowercase ( ) -> str:
SCREAMING_SNAKE_CASE__ : str = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Tuple = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
SCREAMING_SNAKE_CASE__ : Any = generate_example_dataset(
os.path.join(__lowerCAmelCase , """dataset.arrow""" ) , __lowerCAmelCase , num_examples=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__lowerCAmelCase )
def tokenize(__lowerCAmelCase ):
return tokenizer(examples["""text"""] )
SCREAMING_SNAKE_CASE__ : List[str] = map(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = map(__lowerCAmelCase , batched=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""numpy""" ):
SCREAMING_SNAKE_CASE__ : Any = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""pandas""" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
SCREAMING_SNAKE_CASE__ : Any = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
SCREAMING_SNAKE_CASE__ : int = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = map(__lowerCAmelCase , function=__lowerCAmelCase , batched=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = filter(__lowerCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__lowerCAmelCase , """wb""" ) as f:
f.write(json.dumps(__lowerCAmelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 132 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCamelCase ( lowerCAmelCase_ ):
_UpperCamelCase : Optional[Any] = '''blenderbot-small'''
_UpperCamelCase : Union[str, Any] = ['''past_key_values''']
_UpperCamelCase : Dict = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self: int , _SCREAMING_SNAKE_CASE: Any=50265 , _SCREAMING_SNAKE_CASE: int=512 , _SCREAMING_SNAKE_CASE: Optional[int]=8 , _SCREAMING_SNAKE_CASE: Tuple=2048 , _SCREAMING_SNAKE_CASE: Any=16 , _SCREAMING_SNAKE_CASE: Any=8 , _SCREAMING_SNAKE_CASE: List[Any]=2048 , _SCREAMING_SNAKE_CASE: str=16 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: Any=0.0 , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: int="gelu" , _SCREAMING_SNAKE_CASE: Tuple=512 , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: Dict=0.0 , _SCREAMING_SNAKE_CASE: Dict=0.0 , _SCREAMING_SNAKE_CASE: Dict=0.02 , _SCREAMING_SNAKE_CASE: str=1 , _SCREAMING_SNAKE_CASE: Any=False , _SCREAMING_SNAKE_CASE: Optional[int]=0 , _SCREAMING_SNAKE_CASE: int=1 , _SCREAMING_SNAKE_CASE: Any=2 , _SCREAMING_SNAKE_CASE: Optional[Any]=2 , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> int:
"""simple docstring"""
UpperCamelCase_ = vocab_size
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = d_model
UpperCamelCase_ = encoder_ffn_dim
UpperCamelCase_ = encoder_layers
UpperCamelCase_ = encoder_attention_heads
UpperCamelCase_ = decoder_ffn_dim
UpperCamelCase_ = decoder_layers
UpperCamelCase_ = decoder_attention_heads
UpperCamelCase_ = dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = activation_dropout
UpperCamelCase_ = activation_function
UpperCamelCase_ = init_std
UpperCamelCase_ = encoder_layerdrop
UpperCamelCase_ = decoder_layerdrop
UpperCamelCase_ = use_cache
UpperCamelCase_ = encoder_layers
UpperCamelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , forced_eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class _UpperCamelCase ( lowerCAmelCase_ ):
@property
def lowercase ( self: Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCamelCase_ = {0: "batch"}
UpperCamelCase_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCamelCase_ = {0: "batch", 1: "decoder_sequence"}
UpperCamelCase_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCamelCase_ , UpperCamelCase_ = self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = {0: "batch", 2: "past_sequence + sequence"}
UpperCamelCase_ = {0: "batch", 2: "past_sequence + sequence"}
else:
UpperCamelCase_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def lowercase ( self: str ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase_ = super().outputs
else:
UpperCamelCase_ = super(_SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
UpperCamelCase_ , UpperCamelCase_ = self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase_ = {0: "batch", 2: "past_sequence + sequence"}
UpperCamelCase_ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def lowercase ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: PreTrainedTokenizer , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Generate decoder inputs
UpperCamelCase_ = seq_length if not self.use_past else 1
UpperCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase_ = dict(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase_ , UpperCamelCase_ = common_inputs["input_ids"].shape
UpperCamelCase_ = common_inputs["decoder_input_ids"].shape[1]
UpperCamelCase_ , UpperCamelCase_ = self.num_attention_heads
UpperCamelCase_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase_ = decoder_seq_length + 3
UpperCamelCase_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase_ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] , dim=1 )
UpperCamelCase_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase_ , UpperCamelCase_ = self.num_layers
UpperCamelCase_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - min_num_layers
UpperCamelCase_ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
UpperCamelCase_ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) )
return common_inputs
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: PreTrainedTokenizer , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase_ , UpperCamelCase_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCamelCase_ = seqlen + 2
UpperCamelCase_ , UpperCamelCase_ = self.num_layers
UpperCamelCase_ , UpperCamelCase_ = self.num_attention_heads
UpperCamelCase_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase_ = common_inputs["attention_mask"].dtype
UpperCamelCase_ = torch.cat(
[common_inputs["attention_mask"], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
UpperCamelCase_ = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(_SCREAMING_SNAKE_CASE )
]
return common_inputs
def lowercase ( self: int , _SCREAMING_SNAKE_CASE: PreTrainedTokenizer , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase_ = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase_ = tokenizer.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase_ = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase_ = dict(tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
return common_inputs
def lowercase ( self: Any , _SCREAMING_SNAKE_CASE: PreTrainedTokenizer , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: int = -1 , _SCREAMING_SNAKE_CASE: bool = False , _SCREAMING_SNAKE_CASE: Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
elif self.task == "causal-lm":
UpperCamelCase_ = self._generate_dummy_inputs_for_causal_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
return common_inputs
def lowercase ( self: List[str] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: int ) -> Any:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase_ = super()._flatten_past_key_values_(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
UpperCamelCase_ = super(_SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 350 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_UpperCAmelCase = '\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'
_UpperCAmelCase = '\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'
_UpperCAmelCase = '\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
return float((preds == labels).mean() )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="binary" ) -> Tuple:
UpperCamelCase_ = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
UpperCamelCase_ = {}
for id_pred, label in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase_ = F'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
UpperCamelCase_ = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCamelCase_ = [(pred, label)]
UpperCamelCase_ , UpperCamelCase_ = [], []
for question, preds_labels in question_map.items():
UpperCamelCase_ , UpperCamelCase_ = zip(*UpperCamelCase_ )
UpperCamelCase_ = fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ , average="macro" )
fas.append(UpperCamelCase_ )
UpperCamelCase_ = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase_ ) )
ems.append(UpperCamelCase_ )
UpperCamelCase_ = float(sum(UpperCamelCase_ ) / len(UpperCamelCase_ ) )
UpperCamelCase_ = sum(UpperCamelCase_ ) / len(UpperCamelCase_ )
UpperCamelCase_ = float(fa_score(y_true=UpperCamelCase_ , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
def lowercase ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def lowercase ( self: List[Any] ) -> int:
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def lowercase ( self: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] ) -> Dict:
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , fa_avg="macro" )
elif self.config_name == "record":
UpperCamelCase_ = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
UpperCamelCase_ = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 328 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__A : Dict = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(_A )
__A : Tuple = -1
__A : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_A )
__A : Tuple = model.generate(_A , max_new_tokens=10 , do_sample=_A )
__A : List[str] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__A : Any = TextStreamer(_A )
model.generate(_A , max_new_tokens=10 , do_sample=_A , streamer=_A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__A : Optional[int] = cs.out[:-1]
self.assertEqual(_A , _A )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__A : str = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(_A )
__A : Optional[int] = -1
__A : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_A )
__A : Union[str, Any] = model.generate(_A , max_new_tokens=10 , do_sample=_A )
__A : Optional[Any] = tokenizer.decode(greedy_ids[0] )
__A : List[str] = TextIteratorStreamer(_A )
__A : str = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
__A : Any = Thread(target=model.generate , kwargs=_A )
thread.start()
__A : List[Any] = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_A , _A )
def UpperCAmelCase_ ( self ):
__A : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__A : Tuple = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(_A )
__A : Union[str, Any] = -1
__A : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_A )
__A : Optional[int] = model.generate(_A , max_new_tokens=10 , do_sample=_A )
__A : Any = greedy_ids[:, input_ids.shape[1] :]
__A : int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__A : List[Any] = TextStreamer(_A , skip_prompt=_A )
model.generate(_A , max_new_tokens=10 , do_sample=_A , streamer=_A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__A : Dict = cs.out[:-1]
self.assertEqual(_A , _A )
def UpperCAmelCase_ ( self ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__A : Union[str, Any] = AutoTokenizer.from_pretrained('distilgpt2' )
__A : Optional[int] = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(_A )
__A : Optional[int] = -1
__A : Optional[int] = torch.ones((1, 5) , device=_A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__A : Tuple = TextStreamer(_A , skip_special_tokens=_A )
model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__A : str = cs.out[:-1] # Remove the final "\n"
__A : str = tokenizer(_A , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def UpperCAmelCase_ ( self ):
__A : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
__A : Any = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(_A )
__A : Union[str, Any] = -1
__A : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_A )
__A : Any = TextIteratorStreamer(_A , timeout=0.0_0_1 )
__A : Dict = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
__A : Optional[int] = Thread(target=model.generate , kwargs=_A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_A ):
__A : int = ''
for new_text in streamer:
streamer_text += new_text
| 280 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> None:
__A : int = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(a ) == len(a ), F"""{len(a )} != {len(a )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
UpperCAmelCase : List[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
UpperCAmelCase : Optional[int] = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def _SCREAMING_SNAKE_CASE ( a , a ) -> Dict:
try:
__A : int = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
F""" {n_student}""" )
return list(range(a ) )
def _SCREAMING_SNAKE_CASE ( a , a ) -> List[int]:
if n_student > n_teacher:
raise ValueError(F"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(a ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _SCREAMING_SNAKE_CASE ( a , a = "student" , a = None , a = None , a=False , a=None , a=None , **a , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__A : List[str] = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(a , a ):
AutoTokenizer.from_pretrained(a ).save_pretrained(a ) # purely for convenience
__A : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(a ).eval()
else:
assert isinstance(a , a ), F"""teacher must be a model or string got type {type(a )}"""
__A : int = teacher.config.to_diff_dict()
try:
__A , __A : List[Any] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__A : str = teacher_e
if d is None:
__A : List[Any] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
__A , __A : List[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__A , __A : Optional[int] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__A : int = teacher_e
if d is None:
__A : Optional[Any] = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(a )
# Copy weights
__A : Dict = teacher.config_class(**a )
__A : int = AutoModelForSeqaSeqLM.from_config(a )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__A : Any = student.load_state_dict(teacher.state_dict() , strict=a )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__A , __A : Optional[int] = list(range(a ) ), list(range(a ) )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
F""" {save_path}""" )
student.save_pretrained(a )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
if d_layers_to_copy is None:
__A : List[int] = pick_layers_to_copy(a , a )
try:
if hasattr(
a , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , a )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , a )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , a )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , a )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , a )
copy_layers(teacher.decoder.block , student.decoder.block , a )
logger.info(
F"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__A : Optional[int] = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(a )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 280 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCAmelCase__ ( a__: Optional[int] , a__: int , a__: Tuple , a__: List[str] , a__: Dict=True , a__: Optional[int]="pt" ) -> Any:
'''simple docstring'''
_UpperCAmelCase = {'add_prefix_space': True} if isinstance(a__ , a__ ) and not line.startswith(' ' ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=a__ , padding='max_length' if pad_to_max_length else None , truncation=a__ , return_tensors=a__ , add_special_tokens=a__ , **a__ , )
def lowerCAmelCase__ ( a__: List[Any] , a__: Dict , a__: Union[str, Any]=None , ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(a__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="train" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="" , ) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(_SCREAMING_SNAKE_CASE ).joinpath(type_path + '.source' )
_UpperCAmelCase = Path(_SCREAMING_SNAKE_CASE ).joinpath(type_path + '.target' )
_UpperCAmelCase = self.get_char_lens(self.src_file )
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , _SCREAMING_SNAKE_CASE ).rstrip('\n' )
_UpperCAmelCase = linecache.getline(str(self.tgt_file ) , _SCREAMING_SNAKE_CASE ).rstrip('\n' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE ) else self.tokenizer
_UpperCAmelCase = encode_line(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.max_source_length , 'right' )
_UpperCAmelCase = encode_line(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.max_target_length , 'right' )
_UpperCAmelCase = source_inputs['input_ids'].squeeze()
_UpperCAmelCase = target_inputs['input_ids'].squeeze()
_UpperCAmelCase = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return [len(_SCREAMING_SNAKE_CASE ) for x in Path(_SCREAMING_SNAKE_CASE ).open().readlines()]
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_UpperCAmelCase = torch.stack([x['input_ids'] for x in batch] )
_UpperCAmelCase = torch.stack([x['attention_mask'] for x in batch] )
_UpperCAmelCase = torch.stack([x['decoder_input_ids'] for x in batch] )
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _SCREAMING_SNAKE_CASE )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = trim_batch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
lowerCAmelCase__ :Tuple = getLogger(__name__)
def lowerCAmelCase__ ( a__: List[List] ) -> Any:
'''simple docstring'''
return list(itertools.chain.from_iterable(a__ ) )
def lowerCAmelCase__ ( a__: str ) -> None:
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(a__ , os.path.join(a__ , 'git_log.json' ) )
def lowerCAmelCase__ ( a__: Any , a__: List[str] , a__: Union[str, Any]=4 , **a__: int ) -> Optional[int]:
'''simple docstring'''
with open(a__ , 'w' ) as f:
json.dump(a__ , a__ , indent=a__ , **a__ )
def lowerCAmelCase__ ( a__: List[Any] ) -> str:
'''simple docstring'''
with open(a__ ) as f:
return json.load(a__ )
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=a__ )
_UpperCAmelCase = {
'repo_id': str(a__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def lowerCAmelCase__ ( a__: Callable , a__: Iterable ) -> List:
'''simple docstring'''
return list(map(a__ , a__ ) )
def lowerCAmelCase__ ( a__: Any , a__: Dict ) -> Optional[int]:
'''simple docstring'''
with open(a__ , 'wb' ) as f:
return pickle.dump(a__ , a__ )
def lowerCAmelCase__ ( a__: List[str] ) -> Dict:
'''simple docstring'''
def remove_articles(a__: Dict ):
return re.sub(R'\b(a|an|the)\b' , ' ' , a__ )
def white_space_fix(a__: Any ):
return " ".join(text.split() )
def remove_punc(a__: Any ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a__: Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(a__ ) ) ) )
def lowerCAmelCase__ ( a__: Optional[Any] , a__: List[str] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = normalize_answer(a__ ).split()
_UpperCAmelCase = normalize_answer(a__ ).split()
_UpperCAmelCase = Counter(a__ ) & Counter(a__ )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(a__ )
_UpperCAmelCase = 1.0 * num_same / len(a__ )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase__ ( a__: Optional[Any] , a__: Union[str, Any] ) -> List[str]:
'''simple docstring'''
return normalize_answer(a__ ) == normalize_answer(a__ )
def lowerCAmelCase__ ( a__: List[str] , a__: List[str] ) -> Dict:
'''simple docstring'''
assert len(a__ ) == len(a__ )
_UpperCAmelCase = 0
for hypo, pred in zip(a__ , a__ ):
em += exact_match_score(a__ , a__ )
if len(a__ ) > 0:
em /= len(a__ )
return {"em": em}
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return model_prefix.startswith('rag' )
def lowerCAmelCase__ ( a__: int , a__: List[Any] , a__: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = 'dropout_rate'
for p in extra_params:
if getattr(a__ , a__ , a__ ):
if not hasattr(a__ , a__ ) and not hasattr(a__ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(a__ ) )
delattr(a__ , a__ )
continue
_UpperCAmelCase = p if hasattr(a__ , a__ ) else equivalent_param[p]
setattr(a__ , a__ , getattr(a__ , a__ ) )
delattr(a__ , a__ )
return hparams, config
| 362 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowerCAmelCase__ ( a__: str , a__: List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
_UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw ).convert('RGB' )
_UpperCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
_UpperCAmelCase = transform(a__ ).unsqueeze(0 ).to(a__ )
return image
def lowerCAmelCase__ ( a__: Optional[int] ) -> int:
'''simple docstring'''
if "visual_encoder" in key:
_UpperCAmelCase = re.sub('visual_encoder*' , 'vision_model.encoder' , a__ )
if "blocks" in key:
_UpperCAmelCase = re.sub(R'blocks' , 'layers' , a__ )
if "attn" in key:
_UpperCAmelCase = re.sub(R'attn' , 'self_attn' , a__ )
if "norm1" in key:
_UpperCAmelCase = re.sub(R'norm1' , 'layer_norm1' , a__ )
if "norm2" in key:
_UpperCAmelCase = re.sub(R'norm2' , 'layer_norm2' , a__ )
if "encoder.norm" in key:
_UpperCAmelCase = re.sub(R'encoder.norm' , 'post_layernorm' , a__ )
if "encoder.patch_embed.proj" in key:
_UpperCAmelCase = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , a__ )
if "encoder.pos_embed" in key:
_UpperCAmelCase = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , a__ )
if "encoder.cls_token" in key:
_UpperCAmelCase = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , a__ )
if "self_attn" in key:
_UpperCAmelCase = re.sub(R'self_attn.proj' , 'self_attn.projection' , a__ )
return key
@torch.no_grad()
def lowerCAmelCase__ ( a__: Optional[Any] , a__: List[str]=None ) -> Optional[Any]:
'''simple docstring'''
if config_path is not None:
_UpperCAmelCase = BlipConfig.from_pretrained(a__ )
else:
_UpperCAmelCase = BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
_UpperCAmelCase = BlipForConditionalGeneration(a__ ).eval()
_UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
_UpperCAmelCase = blip_decoder(pretrained=a__ , image_size=3_8_4 , vit='base' )
_UpperCAmelCase = pt_model.eval()
_UpperCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(a__ )
_UpperCAmelCase = rename_key(a__ )
_UpperCAmelCase = value
hf_model.load_state_dict(a__ )
_UpperCAmelCase = 3_8_4
_UpperCAmelCase = load_demo_image(image_size=a__ , device='cpu' )
_UpperCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
_UpperCAmelCase = tokenizer(['a picture of'] ).input_ids
_UpperCAmelCase = hf_model.generate(a__ , a__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
_UpperCAmelCase = hf_model.generate(a__ )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(a__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_UpperCAmelCase = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
_UpperCAmelCase = blip_vqa(pretrained=a__ , image_size=a__ , vit='base' )
vqa_model.eval()
_UpperCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(a__ )
_UpperCAmelCase = rename_key(a__ )
_UpperCAmelCase = value
_UpperCAmelCase = BlipForQuestionAnswering(a__ )
hf_vqa_model.load_state_dict(a__ )
_UpperCAmelCase = ['How many dogs are in this image?']
_UpperCAmelCase = tokenizer(a__ , return_tensors='pt' ).input_ids
_UpperCAmelCase = hf_vqa_model.generate(a__ , a__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
_UpperCAmelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
_UpperCAmelCase = blip_itm(pretrained=a__ , image_size=a__ , vit='base' )
itm_model.eval()
_UpperCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(a__ )
_UpperCAmelCase = rename_key(a__ )
_UpperCAmelCase = value
_UpperCAmelCase = BlipForImageTextRetrieval(a__ )
_UpperCAmelCase = ['A picture of a woman with a dog sitting in a beach']
_UpperCAmelCase = tokenizer(
a__ , return_tensors='pt' , padding='max_length' , truncation=a__ , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(a__ )
hf_itm_model.eval()
_UpperCAmelCase = hf_itm_model(a__ , a__ , use_itm_head=a__ )
_UpperCAmelCase = hf_itm_model(a__ , a__ , use_itm_head=a__ )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
lowerCAmelCase__ :int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase__ :List[Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 185 | 0 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ (lowerCamelCase_ ):
__lowerCamelCase : Any = ["""image_processor""", """tokenizer"""]
__lowerCamelCase : Dict = """AutoImageProcessor"""
__lowerCamelCase : Any = """AutoTokenizer"""
def __init__( self , a=None , a=None , **a):
lowercase__ : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
lowercase__ : Dict = kwargs.pop('feature_extractor')
lowercase__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_UpperCAmelCase , _UpperCAmelCase)
lowercase__ : Union[str, Any] = self.image_processor
lowercase__ : Dict = False
def __call__( self , *a , **a):
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase , **_UpperCAmelCase)
lowercase__ : Union[str, Any] = kwargs.pop('images' , _UpperCAmelCase)
lowercase__ : Tuple = kwargs.pop('text' , _UpperCAmelCase)
if len(_UpperCAmelCase) > 0:
lowercase__ : Dict = args[0]
lowercase__ : List[str] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.')
if images is not None:
lowercase__ : Union[str, Any] = self.image_processor(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase)
if text is not None:
lowercase__ : Tuple = self.tokenizer(_UpperCAmelCase , **_UpperCAmelCase)
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase__ : Tuple = encodings['input_ids']
return inputs
def snake_case_ ( self , *a , **a):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase)
def snake_case_ ( self , *a , **a):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase)
@contextmanager
def snake_case_ ( self):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.')
lowercase__ : Dict = True
lowercase__ : List[str] = self.tokenizer
yield
lowercase__ : str = self.image_processor
lowercase__ : Tuple = False
def snake_case_ ( self , a , a=False , a=None):
if added_vocab is None:
lowercase__ : Tuple = self.tokenizer.get_added_vocab()
lowercase__ : Dict = {}
while tokens:
lowercase__ : str = re.search(r'<s_(.*?)>' , _UpperCAmelCase , re.IGNORECASE)
if start_token is None:
break
lowercase__ : Tuple = start_token.group(1)
lowercase__ : Optional[Any] = re.search(rf"""</s_{key}>""" , _UpperCAmelCase , re.IGNORECASE)
lowercase__ : Any = start_token.group()
if end_token is None:
lowercase__ : Union[str, Any] = tokens.replace(_UpperCAmelCase , '')
else:
lowercase__ : Tuple = end_token.group()
lowercase__ : Dict = re.escape(_UpperCAmelCase)
lowercase__ : Optional[Any] = re.escape(_UpperCAmelCase)
lowercase__ : Dict = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , _UpperCAmelCase , re.IGNORECASE)
if content is not None:
lowercase__ : str = content.group(1).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowercase__ : Optional[int] = self.tokenajson(_UpperCAmelCase , is_inner_value=_UpperCAmelCase , added_vocab=_UpperCAmelCase)
if value:
if len(_UpperCAmelCase) == 1:
lowercase__ : Tuple = value[0]
lowercase__ : Optional[int] = value
else: # leaf nodes
lowercase__ : Any = []
for leaf in content.split(r'<sep/>'):
lowercase__ : Any = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowercase__ : Tuple = leaf[1:-2] # for categorical special tokens
output[key].append(_UpperCAmelCase)
if len(output[key]) == 1:
lowercase__ : Optional[Any] = output[key][0]
lowercase__ : Optional[Any] = tokens[tokens.find(_UpperCAmelCase) + len(_UpperCAmelCase) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_UpperCAmelCase , added_vocab=_UpperCAmelCase)
if len(_UpperCAmelCase):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def snake_case_ ( self):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def snake_case_ ( self):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 214 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int = 1000000 ):
'''simple docstring'''
UpperCAmelCase__ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 346 | 0 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
a__ : Union[str, Any] = logging.get_logger('transformers.models.speecht5')
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
hf_model.apply_weight_norm()
__UpperCamelCase = checkpoint["""input_conv.weight_g"""]
__UpperCamelCase = checkpoint["""input_conv.weight_v"""]
__UpperCamelCase = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
__UpperCamelCase = checkpoint[f"upsamples.{i}.1.weight_g"]
__UpperCamelCase = checkpoint[f"upsamples.{i}.1.weight_v"]
__UpperCamelCase = checkpoint[f"upsamples.{i}.1.bias"]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__UpperCamelCase = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_g"]
__UpperCamelCase = checkpoint[f"blocks.{i}.convs1.{j}.1.weight_v"]
__UpperCamelCase = checkpoint[f"blocks.{i}.convs1.{j}.1.bias"]
__UpperCamelCase = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_g"]
__UpperCamelCase = checkpoint[f"blocks.{i}.convs2.{j}.1.weight_v"]
__UpperCamelCase = checkpoint[f"blocks.{i}.convs2.{j}.1.bias"]
__UpperCamelCase = checkpoint["""output_conv.1.weight_g"""]
__UpperCamelCase = checkpoint["""output_conv.1.weight_v"""]
__UpperCamelCase = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def _lowercase ( __A ,__A ,__A ,__A=None ,__A=None ,):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase = SpeechTaHifiGanConfig.from_pretrained(__A )
else:
__UpperCamelCase = SpeechTaHifiGanConfig()
__UpperCamelCase = SpeechTaHifiGan(__A )
__UpperCamelCase = torch.load(__A )
load_weights(orig_checkpoint["""model"""]["""generator"""] ,__A ,__A )
__UpperCamelCase = np.load(__A )
__UpperCamelCase = stats[0].reshape(-1 )
__UpperCamelCase = stats[1].reshape(-1 )
__UpperCamelCase = torch.from_numpy(__A ).float()
__UpperCamelCase = torch.from_numpy(__A ).float()
model.save_pretrained(__A )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(__A )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
a__ : Any = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 243 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ) -> Any:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def __lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=1_0 , )
return model
@property
def __lowerCamelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
__UpperCamelCase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__UpperCamelCase = DDPMScheduler()
__UpperCamelCase = AudioDiffusionPipeline(vqvae=lowercase , unet=self.dummy_unet , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase , steps=4 )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase , steps=4 , return_dict=lowercase )
__UpperCamelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__UpperCamelCase = DDIMScheduler()
__UpperCamelCase = self.dummy_vqvae_and_unet
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
__UpperCamelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(raw_audio=lowercase , generator=lowercase , start_step=5 , steps=1_0 )
__UpperCamelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = self.dummy_unet_condition
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowercase , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
__UpperCamelCase = torch.rand((1, 1, 1_0) )
__UpperCamelCase = pipe(generator=lowercase , encoding=lowercase )
__UpperCamelCase = output.images[0]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = torch_device
__UpperCamelCase = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 243 | 1 |
import os
import numpy
import onnx
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = a.name
__SCREAMING_SNAKE_CASE : Union[str, Any] = b.name
__SCREAMING_SNAKE_CASE : List[Any] = ''''''
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
__SCREAMING_SNAKE_CASE : List[Any] = a == b
__SCREAMING_SNAKE_CASE : Dict = name_a
__SCREAMING_SNAKE_CASE : str = name_b
return res
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(snake_case , snake_case )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , snake_case , snake_case )
_graph_replace_input_with(node_proto.attribute[1].g , snake_case , snake_case )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , snake_case , snake_case )
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
for n in graph_proto.node:
_node_replace_input_with(snake_case , snake_case , snake_case )
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = list(model.graph.initializer )
__SCREAMING_SNAKE_CASE : Dict = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__SCREAMING_SNAKE_CASE : Any = inits[i].name
__SCREAMING_SNAKE_CASE : Dict = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , snake_case , snake_case )
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = os.path.dirname(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.basename(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = onnx.load(os.path.join(snake_case , snake_case ) )
__SCREAMING_SNAKE_CASE : Dict = list(model.graph.initializer )
__SCREAMING_SNAKE_CASE : Optional[Any] = set()
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = 0
for i in range(len(snake_case ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(snake_case ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(snake_case )
dup_set.add(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = inits[j].data_type
__SCREAMING_SNAKE_CASE : List[Any] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , snake_case )
total_reduced_size += mem_size
__SCREAMING_SNAKE_CASE : List[Any] = inits[i].name
__SCREAMING_SNAKE_CASE : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(snake_case )
else:
__SCREAMING_SNAKE_CASE : List[str] = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' )
__SCREAMING_SNAKE_CASE : Any = sorted(snake_case )
_remove_dup_initializers_from_model(snake_case , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Dict = '''optimized_''' + model_file_name
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(snake_case , snake_case )
onnx.save(snake_case , snake_case )
return new_model
| 303 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''mra'''
def __init__( self : str , _A : List[str]=5_0265 , _A : int=768 , _A : Union[str, Any]=12 , _A : Union[str, Any]=12 , _A : Union[str, Any]=3072 , _A : Any="gelu" , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=512 , _A : Tuple=1 , _A : List[str]=0.02 , _A : Union[str, Any]=1e-5 , _A : Optional[int]="absolute" , _A : Union[str, Any]=4 , _A : List[Any]="full" , _A : Union[str, Any]=0 , _A : Union[str, Any]=0 , _A : Optional[Any]=1 , _A : Union[str, Any]=0 , _A : Any=2 , **_A : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : Tuple = num_attention_heads
__SCREAMING_SNAKE_CASE : str = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = initializer_range
__SCREAMING_SNAKE_CASE : Any = type_vocab_size
__SCREAMING_SNAKE_CASE : str = layer_norm_eps
__SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
__SCREAMING_SNAKE_CASE : str = block_per_row
__SCREAMING_SNAKE_CASE : Union[str, Any] = approx_mode
__SCREAMING_SNAKE_CASE : Optional[int] = initial_prior_first_n_blocks
__SCREAMING_SNAKE_CASE : List[Any] = initial_prior_diagonal_n_blocks
| 303 | 1 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
A = logging.get_logger(__name__)
A = {"vocab_file": "spiece.model"}
A = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
A = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
A = 0
A = 1
A = 2
A = 3
A = 4
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = '''left'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<sep>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<cls>" , _UpperCAmelCase="<mask>" , _UpperCAmelCase=["<eop>", "<eod>"] , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__a : str = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token
__a : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_snake_case , remove_space=_snake_case , keep_accents=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , pad_token=_snake_case , cls_token=_snake_case , mask_token=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
__a : List[Any] = 3
__a : Optional[int] = do_lower_case
__a : Any = remove_space
__a : Dict = keep_accents
__a : List[str] = vocab_file
__a : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
@property
def _lowerCamelCase ( self ):
return len(self.sp_model )
def _lowerCamelCase ( self ):
__a : List[Any] = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__a : Optional[int] = self.__dict__.copy()
__a : Any = None
return state
def __setstate__( self , _UpperCAmelCase ):
__a : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__a : Optional[Any] = {}
__a : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self , _UpperCAmelCase ):
if self.remove_space:
__a : str = ''' '''.join(inputs.strip().split() )
else:
__a : int = inputs
__a : List[Any] = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__a : Union[str, Any] = unicodedata.normalize('''NFKD''' , _snake_case )
__a : Any = ''''''.join([c for c in outputs if not unicodedata.combining(_snake_case )] )
if self.do_lower_case:
__a : Optional[int] = outputs.lower()
return outputs
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Union[str, Any] = self.preprocess_text(_snake_case )
__a : List[Any] = self.sp_model.encode(_snake_case , out_type=_snake_case )
__a : Any = []
for piece in pieces:
if len(_snake_case ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__a : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_snake_case , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__a : Union[str, Any] = cur_pieces[1:]
else:
__a : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_snake_case )
else:
new_pieces.append(_snake_case )
return new_pieces
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.sp_model.PieceToId(_snake_case )
def _lowerCamelCase ( self , _UpperCAmelCase ):
return self.sp_model.IdToPiece(_snake_case )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Optional[Any] = ''''''.join(_snake_case ).replace(_snake_case , ''' ''' ).strip()
return out_string
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ):
__a : Optional[int] = kwargs.pop('''use_source_tokenizer''' , _snake_case )
__a : Dict = self.convert_ids_to_tokens(_snake_case , skip_special_tokens=_snake_case )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__a : str = []
__a : Optional[Any] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_snake_case ) )
__a : Any = []
sub_texts.append(_snake_case )
else:
current_sub_text.append(_snake_case )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_snake_case ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__a : List[str] = ''''''.join(_snake_case )
__a : Optional[int] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__a : Optional[Any] = self.clean_up_tokenization(_snake_case )
return clean_text
else:
return text
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Dict = [self.sep_token_id]
__a : List[str] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case )
if token_ids_a is not None:
return ([0] * len(_snake_case )) + [1] + ([0] * len(_snake_case )) + [1, 1]
return ([0] * len(_snake_case )) + [1, 1]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Optional[Any] = [self.sep_token_id]
__a : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : Dict = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , '''wb''' ) as fi:
__a : Dict = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,) | 357 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=1 , ):
__a : Dict = parent
__a : str = batch_size
__a : Union[str, Any] = seq_length
__a : Any = is_training
__a : int = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : int = use_labels
__a : int = vocab_size
__a : int = hidden_size
__a : str = num_hidden_layers
__a : str = num_attention_heads
__a : Any = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Optional[int] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Union[str, Any] = type_vocab_size
__a : List[str] = type_sequence_label_size
__a : List[str] = initializer_range
__a : Optional[int] = num_labels
__a : List[str] = num_choices
__a : int = scope
__a : Union[str, Any] = q_groups
__a : Dict = k_groups
__a : List[str] = v_groups
__a : Any = post_attention_groups
__a : Optional[int] = intermediate_groups
__a : List[str] = output_groups
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Optional[Any] = None
if self.use_input_mask:
__a : int = random_attention_mask([self.batch_size, self.seq_length] )
__a : List[str] = None
__a : Union[str, Any] = None
__a : int = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Dict = ids_tensor([self.batch_size] , self.num_choices )
__a : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Dict = SqueezeBertModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Optional[Any] = model(_UpperCAmelCase , _UpperCAmelCase )
__a : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = SqueezeBertForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : int = SqueezeBertForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Optional[int] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Any = self.num_labels
__a : List[Any] = SqueezeBertForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[Any] = self.num_labels
__a : List[str] = SqueezeBertForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Any = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = self.num_choices
__a : Union[str, Any] = SqueezeBertForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Union[str, Any] = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Optional[Any] = config_and_inputs
__a : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__lowerCAmelCase = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : Union[str, Any] = SqueezeBertModelTester(self )
__a : Dict = ConfigTester(self , config_class=_UpperCAmelCase , dim=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Any = SqueezeBertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
__a : int = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
__a : Tuple = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] )
__a : List[str] = model(_UpperCAmelCase )[0]
__a : int = torch.Size((1, 3) )
self.assertEqual(output.shape , _UpperCAmelCase )
__a : int = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-4 ) ) | 188 | 0 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Tuple = inspect.getfile(accelerate.test_utils )
_snake_case : List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
_snake_case : Optional[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
_snake_case : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
_snake_case : Optional[int] = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a_, env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices." )
_snake_case : Optional[int] = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a_, env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
_snake_case : int = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(a_, env=os.environ.copy() )
@require_multi_gpu
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
_snake_case : Tuple = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1, cuda_visible_devices="""0,1""" ):
execute_subprocess_async(a_, env=os.environ.copy() )
if __name__ == "__main__":
A_ = Accelerator()
A_ = (accelerator.state.process_index + 2, 10)
A_ = torch.randint(0, 10, shape).to(accelerator.device)
A_ = ''''''
A_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
A_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
A_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 64 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ = 16
A_ = 32
def UpperCAmelCase__ (snake_case__ : Accelerator , snake_case__ : int = 16 ):
"""simple docstring"""
_snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_snake_case : Any = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(snake_case__ : Any ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_snake_case : List[Any] = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(snake_case__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_snake_case : Optional[int] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_snake_case : str = 16
elif accelerator.mixed_precision != "no":
_snake_case : Optional[int] = 8
else:
_snake_case : Optional[int] = None
return tokenizer.pad(
snake_case__ , padding="""longest""" , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_snake_case : Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
_snake_case : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ = mocked_dataloaders # noqa: F811
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Any ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , snake_case__ ) == "1":
_snake_case : List[Any] = 2
# Initialize accelerator
_snake_case : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Tuple = config["""lr"""]
_snake_case : str = int(config["""num_epochs"""] )
_snake_case : Union[str, Any] = int(config["""seed"""] )
_snake_case : Union[str, Any] = int(config["""batch_size"""] )
_snake_case : List[str] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=snake_case__ )
def inner_training_loop(snake_case__ : Union[str, Any] ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=snake_case__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_snake_case : Tuple = model.to(accelerator.device )
# Instantiate optimizer
_snake_case : str = AdamW(params=model.parameters() , lr=snake_case__ )
_snake_case , _snake_case : Optional[int] = get_dataloaders(snake_case__ , snake_case__ )
# Instantiate scheduler
_snake_case : str = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=1_00 , num_training_steps=(len(snake_case__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : List[str] = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Now we train the model
for epoch in range(snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_snake_case : int = model(**snake_case__ )
_snake_case : str = outputs.loss
accelerator.backward(snake_case__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : int = model(**snake_case__ )
_snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 )
_snake_case , _snake_case : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
_snake_case : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , snake_case__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=snake_case__ , default=snake_case__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
_snake_case : Dict = parser.parse_args()
_snake_case : int = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 64 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Any = tempfile.mkdtemp()
# fmt: off
_snake_case : Any = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_snake_case : Union[str, Any] = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
_snake_case : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_snake_case : Tuple = {'unk_token': '<unk>'}
_snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase ) )
_snake_case : Union[str, Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
_snake_case : int = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : List[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **UpperCamelCase )
def UpperCamelCase_ ( self : Any , **UpperCamelCase : Tuple ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] , **UpperCamelCase : Any ):
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_snake_case : Any = [Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : int = self.get_tokenizer()
_snake_case : List[str] = self.get_rust_tokenizer()
_snake_case : Dict = self.get_image_processor()
_snake_case : int = OwlViTProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
_snake_case : str = OwlViTProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case : str = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : Any = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_snake_case : Optional[int] = self.get_image_processor(do_normalize=UpperCamelCase )
_snake_case : Any = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = self.get_image_processor()
_snake_case : Tuple = self.get_tokenizer()
_snake_case : Optional[int] = OwlViTProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
_snake_case : List[Any] = self.prepare_image_inputs()
_snake_case : List[Any] = image_processor(UpperCamelCase , return_tensors='np' )
_snake_case : Any = processor(images=UpperCamelCase , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : Optional[int] = self.get_tokenizer()
_snake_case : Optional[int] = OwlViTProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
_snake_case : Optional[Any] = 'lower newer'
_snake_case : Optional[int] = processor(text=UpperCamelCase , return_tensors='np' )
_snake_case : List[Any] = tokenizer(UpperCamelCase , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Optional[int] = OwlViTProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
_snake_case : Tuple = 'lower newer'
_snake_case : Union[str, Any] = self.prepare_image_inputs()
_snake_case : Any = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : List[str] = 'google/owlvit-base-patch32'
_snake_case : Dict = OwlViTProcessor.from_pretrained(UpperCamelCase )
_snake_case : List[Any] = ['cat', 'nasa badge']
_snake_case : Optional[int] = processor(text=UpperCamelCase )
_snake_case : Any = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : str = 'google/owlvit-base-patch32'
_snake_case : List[Any] = OwlViTProcessor.from_pretrained(UpperCamelCase )
_snake_case : Optional[int] = [['cat', 'nasa badge'], ['person']]
_snake_case : List[str] = processor(text=UpperCamelCase )
_snake_case : Tuple = 16
_snake_case : Tuple = len(UpperCamelCase )
_snake_case : int = max([len(UpperCamelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Optional[Any] = 'google/owlvit-base-patch32'
_snake_case : Any = OwlViTProcessor.from_pretrained(UpperCamelCase )
_snake_case : Any = ['cat', 'nasa badge']
_snake_case : List[Any] = processor(text=UpperCamelCase )
_snake_case : List[Any] = 16
_snake_case : Tuple = inputs['input_ids']
_snake_case : Tuple = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : int = self.get_image_processor()
_snake_case : Dict = self.get_tokenizer()
_snake_case : Tuple = OwlViTProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
_snake_case : str = self.prepare_image_inputs()
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : str = processor(images=UpperCamelCase , query_images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : int = self.get_image_processor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : List[Any] = OwlViTProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
_snake_case : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : Optional[int] = processor.batch_decode(UpperCamelCase )
_snake_case : List[Any] = tokenizer.batch_decode(UpperCamelCase )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
| 260 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: int , lowerCAmelCase: List[Any] )-> Dict:
# Initialise PyTorch model
_snake_case : Dict = RemBertConfig.from_json_file(lowerCAmelCase )
print('Building PyTorch model from configuration: {}'.format(str(lowerCAmelCase ) ) )
_snake_case : Optional[Any] = RemBertModel(lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowerCAmelCase ) )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase_ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 260 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__lowercase : Optional[int] = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase_ ):
def __init__( self , *__a , **__a ):
'''simple docstring'''
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , __a , )
super().__init__(*__a , **__a )
| 27 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A__ : Any =logging.get_logger(__name__)
A__ : List[Any] ='''▁'''
A__ : Optional[int] ={'''vocab_file''': '''sentencepiece.bpe.model'''}
A__ : Union[str, Any] ={
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
A__ : Dict ={
'''facebook/nllb-200-distilled-600M''': 10_24,
}
# fmt: off
A__ : Union[str, Any] =['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCAmelCase ( snake_case_ ):
_lowercase: int = VOCAB_FILES_NAMES
_lowercase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase: Dict = PRETRAINED_VOCAB_FILES_MAP
_lowercase: str = ['''input_ids''', '''attention_mask''']
_lowercase: List[int] = []
_lowercase: List[int] = []
def __init__( self : int , __snake_case : Optional[Any] , __snake_case : Dict="<s>" , __snake_case : Optional[int]="</s>" , __snake_case : Dict="</s>" , __snake_case : str="<s>" , __snake_case : Optional[int]="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : Union[str, Any]="<mask>" , __snake_case : List[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Optional[Dict[str, Any]] = None , __snake_case : str=None , __snake_case : str=False , **__snake_case : List[Any] , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase = legacy_behaviour
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , tokenizer_file=__snake_case , src_lang=__snake_case , tgt_lang=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__snake_case , **__snake_case , )
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__snake_case ) )
_lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase = 1
_lowerCAmelCase = len(self.sp_model )
_lowerCAmelCase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__snake_case )
}
_lowerCAmelCase = {v: k for k, v in self.lang_code_to_id.items()}
_lowerCAmelCase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowerCAmelCase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
_lowerCAmelCase = src_lang if src_lang is not None else """eng_Latn"""
_lowerCAmelCase = self.lang_code_to_id[self._src_lang]
_lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ) -> List[str]:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
_lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , __snake_case : Optional[Any] ) -> Dict:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowercase__ ( self : List[Any] ) -> Any:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase__ ( self : int ) -> str:
return self._src_lang
@src_lang.setter
def lowercase__ ( self : Dict , __snake_case : str ) -> None:
_lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase__ ( self : List[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
_lowerCAmelCase = [1] * len(self.prefix_tokens )
_lowerCAmelCase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def lowercase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase__ ( self : Optional[Any] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self : List[str] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Optional[str] , __snake_case : Optional[str] , **__snake_case : Optional[int] ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase = src_lang
_lowerCAmelCase = self(__snake_case , add_special_tokens=__snake_case , return_tensors=__snake_case , **__snake_case )
_lowerCAmelCase = self.convert_tokens_to_ids(__snake_case )
_lowerCAmelCase = tgt_lang_id
return inputs
def lowercase__ ( self : List[Any] ) -> Optional[int]:
_lowerCAmelCase = {self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase__ ( self : Optional[int] , __snake_case : str ) -> List[str]:
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def lowercase__ ( self : Optional[Any] , __snake_case : Union[str, Any] ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase = self.sp_model.PieceToId(__snake_case )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase__ ( self : List[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase__ ( self : Optional[int] , __snake_case : Union[str, Any] ) -> str:
_lowerCAmelCase = """""".join(__snake_case ).replace(__snake_case , """ """ ).strip()
return out_string
def lowercase__ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase = os.path.join(
__snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(__snake_case , """wb""" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (out_vocab_file,)
def lowercase__ ( self : Optional[Any] , __snake_case : List[str] , __snake_case : str = "eng_Latn" , __snake_case : Optional[List[str]] = None , __snake_case : str = "fra_Latn" , **__snake_case : Optional[int] , ) -> BatchEncoding:
_lowerCAmelCase = src_lang
_lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case )
def lowercase__ ( self : str ) -> Tuple:
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase__ ( self : Dict ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase__ ( self : str , __snake_case : int ) -> None:
_lowerCAmelCase = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_lowerCAmelCase = []
_lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase = [self.cur_lang_code]
_lowerCAmelCase = [self.eos_token_id]
def lowercase__ ( self : Any , __snake_case : str ) -> None:
_lowerCAmelCase = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_lowerCAmelCase = []
_lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase = [self.cur_lang_code]
_lowerCAmelCase = [self.eos_token_id]
| 70 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def _lowerCAmelCase ( A__: Optional[int] , A__: Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
UpperCAmelCase = DatasetInfosDict.from_directory(A__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def _lowerCAmelCase ( A__: Union[str, Any] , A__: DatasetInfo ):
'''simple docstring'''
UpperCAmelCase = str(A__ )
dataset_info.write_to_directory(A__ )
UpperCAmelCase = DatasetInfo.from_directory(A__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A__ , '''dataset_info.json''' ) )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
UpperCAmelCase = dataset_info._to_yaml_dict()
assert sorted(A__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
UpperCAmelCase = yaml.safe_dump(A__ )
UpperCAmelCase = yaml.safe_load(A__ )
assert dataset_info_yaml_dict == reloaded
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = DatasetInfo()
UpperCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def _lowerCAmelCase ( A__: Optional[Any] , A__: DatasetInfosDict ):
'''simple docstring'''
UpperCAmelCase = str(A__ )
dataset_infos_dict.write_to_directory(A__ )
UpperCAmelCase = DatasetInfosDict.from_directory(A__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
UpperCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A__ , '''README.md''' ) )
| 354 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["BeitFeatureExtractor"]
__magic_name__ = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 152 | 0 |
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :int = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Any = """align_text_model"""
def __init__( self : str , snake_case_ : Any=3_0_5_2_2 , snake_case_ : Dict=7_6_8 , snake_case_ : Union[str, Any]=1_2 , snake_case_ : Dict=1_2 , snake_case_ : Optional[int]=3_0_7_2 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : Tuple=0.1 , snake_case_ : Dict=5_1_2 , snake_case_ : Optional[Any]=2 , snake_case_ : int=0.0_2 , snake_case_ : Dict=1e-12 , snake_case_ : List[Any]=0 , snake_case_ : Optional[Any]="absolute" , snake_case_ : Dict=True , **snake_case_ : Dict , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = pad_token_id
@classmethod
def lowercase ( cls : Optional[int] , snake_case_ : Union[str, os.PathLike] , **snake_case_ : Optional[int] ):
cls._set_token_in_kwargs(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
_UpperCAmelCase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = """align_vision_model"""
def __init__( self : int , snake_case_ : int = 3 , snake_case_ : int = 6_0_0 , snake_case_ : float = 2.0 , snake_case_ : float = 3.1 , snake_case_ : int = 8 , snake_case_ : List[int] = [3, 3, 5, 3, 5, 5, 3] , snake_case_ : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , snake_case_ : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , snake_case_ : List[int] = [] , snake_case_ : List[int] = [1, 2, 2, 2, 1, 2, 1] , snake_case_ : List[int] = [1, 2, 2, 3, 3, 4, 1] , snake_case_ : List[int] = [1, 6, 6, 6, 6, 6, 6] , snake_case_ : float = 0.2_5 , snake_case_ : str = "swish" , snake_case_ : int = 2_5_6_0 , snake_case_ : str = "mean" , snake_case_ : float = 0.0_2 , snake_case_ : float = 0.0_0_1 , snake_case_ : float = 0.9_9 , snake_case_ : float = 0.2 , **snake_case_ : Dict , ):
super().__init__(**snake_case_ )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = width_coefficient
_UpperCAmelCase = depth_coefficient
_UpperCAmelCase = depth_divisor
_UpperCAmelCase = kernel_sizes
_UpperCAmelCase = in_channels
_UpperCAmelCase = out_channels
_UpperCAmelCase = depthwise_padding
_UpperCAmelCase = strides
_UpperCAmelCase = num_block_repeats
_UpperCAmelCase = expand_ratios
_UpperCAmelCase = squeeze_expansion_ratio
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = pooling_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = batch_norm_eps
_UpperCAmelCase = batch_norm_momentum
_UpperCAmelCase = drop_connect_rate
_UpperCAmelCase = sum(snake_case_ ) * 4
@classmethod
def lowercase ( cls : int , snake_case_ : Union[str, os.PathLike] , **snake_case_ : Tuple ):
cls._set_token_in_kwargs(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = cls.get_config_dict(snake_case_ , **snake_case_ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("model_type" ) == "align":
_UpperCAmelCase = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(snake_case_ , **snake_case_ )
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Optional[int] = """align"""
_lowerCamelCase : Any = True
def __init__( self : Tuple , snake_case_ : Dict=None , snake_case_ : Dict=None , snake_case_ : Tuple=6_4_0 , snake_case_ : List[str]=1.0 , snake_case_ : Dict=0.0_2 , **snake_case_ : Optional[int] , ):
super().__init__(**snake_case_ )
if text_config is None:
_UpperCAmelCase = {}
logger.info("text_config is None. Initializing the AlignTextConfig with default values." )
if vision_config is None:
_UpperCAmelCase = {}
logger.info("vision_config is None. Initializing the AlignVisionConfig with default values." )
_UpperCAmelCase = AlignTextConfig(**snake_case_ )
_UpperCAmelCase = AlignVisionConfig(**snake_case_ )
_UpperCAmelCase = projection_dim
_UpperCAmelCase = temperature_init_value
_UpperCAmelCase = initializer_range
@classmethod
def lowercase ( cls : Optional[Any] , snake_case_ : AlignTextConfig , snake_case_ : AlignVisionConfig , **snake_case_ : Union[str, Any] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case_ )
def lowercase ( self : List[Any] ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.text_config.to_dict()
_UpperCAmelCase = self.vision_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 22 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 254 | 0 |
'''simple docstring'''
import argparse
__lowercase: List[Any] = "docs/source/_static/js/custom.js"
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> List[Any]:
'''simple docstring'''
with open(_UpperCamelCase , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
UpperCamelCase__ = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
if __name__ == "__main__":
__lowercase: List[Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
__lowercase: List[Any] = parser.parse_args()
update_custom_js(args.version) | 31 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power)) | 31 | 1 |
snake_case_ : List[Any] = 9.80_665
def A (__A : float , __A : float , __A : float = g ) -> float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 51 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __snake_case ( a , a , a , unittest.TestCase ):
UpperCAmelCase__ : List[Any] = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
UpperCAmelCase__ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0)
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Any , _snake_case : Dict=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , )
UpperCAmelCase_ = floats_tensor(control_image.shape , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase ( self : Any):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : Any):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
class __snake_case ( a , a , unittest.TestCase ):
UpperCAmelCase__ : str = StableDiffusionControlNetImgaImgPipeline
UpperCAmelCase__ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ : str = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowerCamelCase ( self : str):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0)
def init_weights(_snake_case : Optional[int]):
if isinstance(_snake_case , torch.nn.Convad):
torch.nn.init.normal(m.weight)
m.bias.data.fill_(1.0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_snake_case)
torch.manual_seed(0)
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , )
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0)
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(_snake_case)
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
UpperCAmelCase_ = MultiControlNetModel([controlneta, controlneta])
UpperCAmelCase_ = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase ( self : int , _snake_case : Union[str, Any] , _snake_case : str=0):
"""simple docstring"""
if str(_snake_case).startswith('''mps'''):
UpperCAmelCase_ = torch.manual_seed(_snake_case)
else:
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(_snake_case)
UpperCAmelCase_ = 2
UpperCAmelCase_ = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_snake_case , device=torch.device(_snake_case) , ),
]
UpperCAmelCase_ = floats_tensor(control_image[0].shape , rng=random.Random(_snake_case)).to(_snake_case)
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1)[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(_snake_case)).convert('''RGB''').resize((64, 64))
UpperCAmelCase_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
UpperCAmelCase_ = 1_0.0
UpperCAmelCase_ = 4
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case)[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.1 , control_guidance_end=0.2)[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7])[0]
UpperCAmelCase_ = self.get_dummy_inputs(_snake_case)
UpperCAmelCase_ = steps
UpperCAmelCase_ = scale
UpperCAmelCase_ = pipe(**_snake_case , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8])[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
assert np.sum(np.abs(output_a - output_a)) > 1e-3
def lowerCamelCase ( self : Dict):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3)
def lowerCamelCase ( self : int):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**_snake_case)
pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_snake_case)
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''')
UpperCAmelCase_ = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=_snake_case , controlnet=_snake_case)
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = torch.Generator(device='''cpu''').manual_seed(0)
UpperCAmelCase_ = '''evil space-punk bird'''
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''').resize((512, 512))
UpperCAmelCase_ = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''').resize((512, 512))
UpperCAmelCase_ = pipe(
_snake_case , _snake_case , control_image=_snake_case , generator=_snake_case , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (512, 512, 3)
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''')
assert np.abs(expected_image - image).max() < 9e-2
| 51 | 1 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_snake_case = TypeVar('T')
class UpperCamelCase ( Generic[T] ):
UpperCamelCase : deque[T] # Cache store of keys
UpperCamelCase : set[T] # References of the keys in cache
UpperCamelCase : int = 10 # Maximum capacity of cache
def __init__( self : Any , UpperCAmelCase__ : int ) -> None:
_a : int = deque()
_a : Optional[Any] = set()
if not n:
_a : Tuple = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
_a : Union[str, Any] = n
def _lowercase ( self : List[str] , UpperCAmelCase__ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_a : List[Any] = self.dq_store.pop()
self.key_reference.remove(UpperCAmelCase__ )
else:
self.dq_store.remove(UpperCAmelCase__ )
self.dq_store.appendleft(UpperCAmelCase__ )
self.key_reference.add(UpperCAmelCase__ )
def _lowercase ( self : str ) -> None:
for k in self.dq_store:
print(UpperCAmelCase__ )
def __repr__( self : int ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 324 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
@property
def _lowercase ( self : Optional[int] ) -> Union[str, Any]:
torch.manual_seed(0 )
_a : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _lowercase ( self : Dict ) -> Dict:
_a : str = self.dummy_uncond_unet
_a : Optional[int] = KarrasVeScheduler()
_a : List[str] = KarrasVePipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : int = torch.manual_seed(0 )
_a : List[Any] = pipe(num_inference_steps=2 , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : Tuple = torch.manual_seed(0 )
_a : int = pipe(num_inference_steps=2 , generator=UpperCAmelCase__ , output_type="""numpy""" , return_dict=UpperCAmelCase__ )[0]
_a : int = image[0, -3:, -3:, -1]
_a : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : str = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : Tuple ) -> List[str]:
_a : Optional[Any] = """google/ncsnpp-celebahq-256"""
_a : Any = UNetaDModel.from_pretrained(UpperCAmelCase__ )
_a : Dict = KarrasVeScheduler()
_a : int = KarrasVePipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : Optional[int] = torch.manual_seed(0 )
_a : Tuple = pipe(num_inference_steps=20 , generator=UpperCAmelCase__ , output_type="""numpy""" ).images
_a : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_a : Optional[int] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 324 | 1 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_UpperCamelCase = 'sshleifer/mar_enro_6_3_student'
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ) -> Any:
super().setUp()
__lowerCamelCase : str = cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=_a , )
__lowerCamelCase : List[str] = f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def _lowercase ( self : Union[str, Any] ) -> str:
MarianMTModel.from_pretrained(_a )
@slow
@require_torch_gpu
def _lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCamelCase : Optional[int] = {
'$MAX_LEN': 64,
'$BS': 64,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
__lowerCamelCase : int = (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
__lowerCamelCase : Dict = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
__lowerCamelCase : Optional[int] = bash_script.replace(_a , str(_a ) )
__lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__lowerCamelCase : Tuple = f'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__lowerCamelCase : str = ['finetune.py'] + bash_script.split() + args
with patch.object(_a , 'argv' , _a ):
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
__lowerCamelCase : List[Any] = pl.Trainer.add_argparse_args(_a )
__lowerCamelCase : Optional[int] = SummarizationModule.add_model_specific_args(_a , os.getcwd() )
__lowerCamelCase : Any = parser.parse_args()
__lowerCamelCase : int = main(_a )
# Check metrics
__lowerCamelCase : Dict = load_json(model.metrics_save_path )
__lowerCamelCase : Tuple = metrics['val'][0]
__lowerCamelCase : str = metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] , _a )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__lowerCamelCase : Any = os.listdir(_a )
__lowerCamelCase : List[str] = [x for x in contents if x.endswith('.ckpt' )][0]
__lowerCamelCase : str = os.path.join(args.output_dir , _a )
__lowerCamelCase : List[Any] = torch.load(_a , map_location='cpu' )
__lowerCamelCase : List[Any] = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__lowerCamelCase : Tuple = {os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def _lowercase ( self : List[Any] ) -> List[str]:
__lowerCamelCase : Tuple = f'{self.test_file_dir_str}/test_data/wmt_en_ro'
__lowerCamelCase : List[Any] = {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 128,
'$BS': 16,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
__lowerCamelCase : Any = (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
__lowerCamelCase : Union[str, Any] = bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
__lowerCamelCase : str = bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
__lowerCamelCase : Any = bash_script.replace(_a , str(_a ) )
__lowerCamelCase : Union[str, Any] = self.get_auto_remove_tmp_dir()
__lowerCamelCase : Tuple = bash_script.replace('--fp16' , '' )
__lowerCamelCase : List[str] = 6
__lowerCamelCase : str = (
['distillation.py']
+ bash_script.split()
+ [
f'--output_dir={output_dir}',
'--gpus=1',
'--learning_rate=1e-3',
f'--num_train_epochs={epochs}',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(_a , 'argv' , _a ):
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
__lowerCamelCase : int = pl.Trainer.add_argparse_args(_a )
__lowerCamelCase : List[Any] = SummarizationDistiller.add_model_specific_args(_a , os.getcwd() )
__lowerCamelCase : int = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__lowerCamelCase : Union[str, Any] = distill_main(_a )
# Check metrics
__lowerCamelCase : Optional[int] = load_json(model.metrics_save_path )
__lowerCamelCase : Tuple = metrics['val'][0]
__lowerCamelCase : int = metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] , _a )
# check lightning ckpt can be loaded and has a reasonable statedict
__lowerCamelCase : Union[str, Any] = os.listdir(_a )
__lowerCamelCase : List[str] = [x for x in contents if x.endswith('.ckpt' )][0]
__lowerCamelCase : List[str] = os.path.join(args.output_dir , _a )
__lowerCamelCase : str = torch.load(_a , map_location='cpu' )
__lowerCamelCase : str = 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__lowerCamelCase : List[Any] = {os.path.basename(_a ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 208 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = 100 ,) -> float:
__lowerCamelCase : Dict = x_start
__lowerCamelCase : int = fnc(_lowerCAmelCase )
__lowerCamelCase : Dict = 0.0
for _ in range(_lowerCAmelCase ):
# Approximates curve as a sequence of linear lines and sums their length
__lowerCamelCase : List[str] = (x_end - x_start) / steps + xa
__lowerCamelCase : List[Any] = fnc(_lowerCAmelCase )
length += math.hypot(xa - xa ,fxa - fxa )
# Increment step
__lowerCamelCase : Any = xa
__lowerCamelCase : Tuple = fxa
return length
if __name__ == "__main__":
def a_ ( _lowerCAmelCase ) -> Dict:
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
_UpperCamelCase = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 208 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=7 , __lowerCamelCase=3 , __lowerCamelCase=3_0 , __lowerCamelCase=4_0_0 , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=[0.5, 0.5, 0.5] , __lowerCamelCase=[0.5, 0.5, 0.5] , __lowerCamelCase=True , __lowerCamelCase=1 / 2_5_5 , __lowerCamelCase=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
_SCREAMING_SNAKE_CASE : Optional[int] = parent
_SCREAMING_SNAKE_CASE : Optional[int] = batch_size
_SCREAMING_SNAKE_CASE : Dict = num_channels
_SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution
_SCREAMING_SNAKE_CASE : str = max_resolution
_SCREAMING_SNAKE_CASE : str = do_resize
_SCREAMING_SNAKE_CASE : Optional[Any] = size
_SCREAMING_SNAKE_CASE : List[Any] = do_normalize
_SCREAMING_SNAKE_CASE : Union[str, Any] = image_mean
_SCREAMING_SNAKE_CASE : Optional[Any] = image_std
_SCREAMING_SNAKE_CASE : str = do_rescale
_SCREAMING_SNAKE_CASE : Any = rescale_factor
_SCREAMING_SNAKE_CASE : List[Any] = do_pad
def UpperCamelCase_ ( self ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=False ) -> str:
if not batched:
_SCREAMING_SNAKE_CASE : Optional[Any] = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = image.size
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = image.shape[1], image.shape[2]
if w < h:
_SCREAMING_SNAKE_CASE : int = int(self.size["shortest_edge"] * h / w )
_SCREAMING_SNAKE_CASE : str = self.size["shortest_edge"]
elif w > h:
_SCREAMING_SNAKE_CASE : int = self.size["shortest_edge"]
_SCREAMING_SNAKE_CASE : Optional[Any] = int(self.size["shortest_edge"] * w / h )
else:
_SCREAMING_SNAKE_CASE : Any = self.size["shortest_edge"]
_SCREAMING_SNAKE_CASE : List[Any] = self.size["shortest_edge"]
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = []
for image in image_inputs:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_SCREAMING_SNAKE_CASE : Optional[int] = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
_SCREAMING_SNAKE_CASE : List[str] = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = DetaImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : str = DetaImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> Tuple:
pass
def UpperCamelCase_ ( self ) -> Any:
# Initialize image_processing
_SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Tuple:
# Initialize image_processing
_SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE : Dict = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
# Initialize image_processing
_SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE : Tuple = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase_ ( self ) -> List[str]:
# prepare image and target
_SCREAMING_SNAKE_CASE : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
_SCREAMING_SNAKE_CASE : Dict = json.loads(f.read() )
_SCREAMING_SNAKE_CASE : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
_SCREAMING_SNAKE_CASE : int = DetaImageProcessor()
_SCREAMING_SNAKE_CASE : List[Any] = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
_SCREAMING_SNAKE_CASE : Tuple = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1E-4 ) )
# verify area
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
_SCREAMING_SNAKE_CASE : str = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1E-3 ) )
# verify image_id
_SCREAMING_SNAKE_CASE : Tuple = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def UpperCamelCase_ ( self ) -> Union[str, Any]:
# prepare image, target and masks_path
_SCREAMING_SNAKE_CASE : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
_SCREAMING_SNAKE_CASE : Optional[int] = json.loads(f.read() )
_SCREAMING_SNAKE_CASE : int = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
_SCREAMING_SNAKE_CASE : List[str] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_SCREAMING_SNAKE_CASE : Tuple = DetaImageProcessor(format="coco_panoptic" )
_SCREAMING_SNAKE_CASE : Tuple = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
_SCREAMING_SNAKE_CASE : Dict = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1E-4 ) )
# verify area
_SCREAMING_SNAKE_CASE : List[str] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1E-3 ) )
# verify image_id
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
_SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
_SCREAMING_SNAKE_CASE : Tuple = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
_SCREAMING_SNAKE_CASE : Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
_SCREAMING_SNAKE_CASE : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
_SCREAMING_SNAKE_CASE : int = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) ) | 325 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ ='src/diffusers'
UpperCamelCase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ =spec.loader.load_module()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$", __lowerCamelCase ) is not None
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = object_name.split("." )
_SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's find the module where our object lives.
_SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase, f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase, parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase, f"""{module}.py""" ), "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
# Now let's find the class / func in the code!
_SCREAMING_SNAKE_CASE : Union[str, Any] = ""
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_SCREAMING_SNAKE_CASE : Optional[int] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index], __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : Optional[int] = lines[start_index:line_index]
return "".join(__lowerCamelCase )
UpperCamelCase__ =re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase__ =re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase__ =re.compile(R'<FILL\s+[^>]*>')
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = code.split("\n" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""class Bla:\n{code}"""
_SCREAMING_SNAKE_CASE : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = black.format_str(__lowerCamelCase, mode=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = search.groups()
_SCREAMING_SNAKE_CASE : Any = find_code_in_diffusers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_indent(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
_SCREAMING_SNAKE_CASE : int = theoretical_indent
_SCREAMING_SNAKE_CASE : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_SCREAMING_SNAKE_CASE : Any = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : str = _should_continue(__lowerCamelCase, __lowerCamelCase ) and re.search(f"""^{indent}# End copy""", __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : List[Any] = lines[start_index:line_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_SCREAMING_SNAKE_CASE : Dict = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
_SCREAMING_SNAKE_CASE : str = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = replace_pattern.replace("with", "" ).split("," )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pattern.groups()
_SCREAMING_SNAKE_CASE : Tuple = re.sub(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if option.strip() == "all-casing":
_SCREAMING_SNAKE_CASE : List[Any] = re.sub(obja.lower(), obja.lower(), __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = re.sub(obja.upper(), obja.upper(), __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_SCREAMING_SNAKE_CASE : int = blackify(lines[start_index - 1] + theoretical_code )
_SCREAMING_SNAKE_CASE : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_SCREAMING_SNAKE_CASE : int = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ (__lowerCamelCase = False ):
_SCREAMING_SNAKE_CASE : int = glob.glob(os.path.join(__lowerCamelCase, "**/*.py" ), recursive=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for filename in all_files:
_SCREAMING_SNAKE_CASE : int = is_copy_consistent(__lowerCamelCase, __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Dict = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_copies(args.fix_and_overwrite) | 325 | 1 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def a_ ( ):
UpperCAmelCase__ = 1_0
UpperCAmelCase__ = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
UpperCAmelCase__ = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0,
'id': list(range(lowerCamelCase ) ),
} , features=lowerCamelCase , )
return dataset
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=lowerCamelCase )
return filename
# FILE_CONTENT + files
lowerCAmelCase__ : Tuple = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt'
UpperCAmelCase__ = FILE_CONTENT
with open(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase )
return filename
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
import bza
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
UpperCAmelCase__ = bytes(lowerCamelCase , 'utf-8' )
with bza.open(lowerCamelCase , 'wb' ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
import gzip
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
UpperCAmelCase__ = bytes(lowerCamelCase , 'utf-8' )
with gzip.open(lowerCamelCase , 'wb' ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
UpperCAmelCase__ = bytes(lowerCamelCase , 'utf-8' )
with lza.frame.open(lowerCamelCase , 'wb' ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(lowerCamelCase , 'w' ) as archive:
archive.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase ):
import tarfile
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(lowerCamelCase , 'w' ) as f:
f.add(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
import lzma
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
UpperCAmelCase__ = bytes(lowerCamelCase , 'utf-8' )
with lzma.open(lowerCamelCase , 'wb' ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase ):
import zipfile
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
UpperCAmelCase__ = bytes(lowerCamelCase , 'utf-8' )
with zstd.open(lowerCamelCase , 'wb' ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'file.xml'
UpperCAmelCase__ = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase )
return filename
lowerCAmelCase__ : Optional[Any] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCAmelCase__ : str = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCAmelCase__ : Any = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCAmelCase__ : Any = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCAmelCase__ : Union[str, Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def a_ ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = datasets.Dataset.from_dict(lowerCamelCase )
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(lowerCamelCase ) ) as con:
UpperCAmelCase__ = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(lowerCamelCase , 'w' , newline='' ) as f:
UpperCAmelCase__ = csv.DictWriter(lowerCamelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(lowerCamelCase , 'w' , newline='' ) as f:
UpperCAmelCase__ = csv.DictWriter(lowerCamelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase ):
import bza
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(lowerCamelCase , 'rb' ) as f:
UpperCAmelCase__ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase , 'wb' ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(lowerCamelCase , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase ) ) )
f.write(lowerCamelCase , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
UpperCAmelCase__ = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(lowerCamelCase , 'wb' ) as f:
UpperCAmelCase__ = pq.ParquetWriter(lowerCamelCase , schema=lowerCamelCase )
UpperCAmelCase__ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase ) )] for k in DATA[0]} , schema=lowerCamelCase )
writer.write_table(lowerCamelCase )
writer.close()
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase__ = {'data': DATA}
with open(lowerCamelCase , 'w' ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
UpperCAmelCase__ = {'data': DATA_DICT_OF_LISTS}
with open(lowerCamelCase , 'w' ) as f:
json.dump(lowerCamelCase , lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(lowerCamelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(lowerCamelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(lowerCamelCase , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(lowerCamelCase , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase ):
import gzip
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(lowerCamelCase , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase ):
import gzip
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(lowerCamelCase , 'rb' ) as orig_file:
with gzip.open(lowerCamelCase , 'wb' ) as zipped_file:
zipped_file.writelines(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase ) ) )
f.write(lowerCamelCase , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(lowerCamelCase , 'w' ) as f:
f.add(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.add(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(lowerCamelCase , 'w' ) as f:
f.add(lowerCamelCase , arcname=os.path.join('nested' , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = ['0', '1', '2', '3']
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(lowerCamelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = ['0', '1', '2', '3']
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(lowerCamelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = ['0', '1', '2', '3']
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(lowerCamelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase ) ) )
f.write(lowerCamelCase , arcname=os.path.join('main_dir' , os.path.basename(lowerCamelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.basename('unsupported.ext' ) )
f.write(lowerCamelCase , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
UpperCAmelCase__ = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(lowerCamelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( ):
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def a_ ( ):
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(lowerCamelCase , 'w' ) as f:
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ) )
f.write(lowerCamelCase , arcname=os.path.basename(lowerCamelCase ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 1_0 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 1_0 )
return data_dir
| 98 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class __a :
__a : int = BlenderbotConfig
__a : Any = {}
__a : str = "gelu"
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any]=13 , __magic_name__ : Any=7 , __magic_name__ : Optional[Any]=True , __magic_name__ : str=False , __magic_name__ : Any=99 , __magic_name__ : List[Any]=32 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : List[Any]=4 , __magic_name__ : List[str]=37 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : List[str]=20 , __magic_name__ : List[str]=2 , __magic_name__ : Any=1 , __magic_name__ : Union[str, Any]=0 , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Union[str, Any] = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Union[str, Any] = pad_token_id
UpperCAmelCase_ : Tuple = bos_token_id
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ : str = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ : str = prepare_blenderbot_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
return config, inputs_dict
def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = TFBlenderbotModel(config=__magic_name__ ).get_decoder()
UpperCAmelCase_ : Union[str, Any] = inputs_dict['''input_ids''']
UpperCAmelCase_ : Any = input_ids[:1, :]
UpperCAmelCase_ : Tuple = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase_ : List[str] = inputs_dict['''head_mask''']
UpperCAmelCase_ : Any = 1
# first forward pass
UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ , use_cache=__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ )[0]
UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ : Any = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-3 )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Optional[int]=None, SCREAMING_SNAKE_CASE__ : Optional[int]=None, SCREAMING_SNAKE_CASE__ : Optional[int]=None, SCREAMING_SNAKE_CASE__ : Tuple=None, SCREAMING_SNAKE_CASE__ : Any=None, ) -> Any:
if attention_mask is None:
UpperCAmelCase_ : List[str] = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE__, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ : List[str] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
UpperCAmelCase_ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Tuple = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
__a : Tuple = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
__a : List[str] = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
__a : List[str] = True
__a : Any = False
__a : Optional[int] = False
def UpperCAmelCase__ ( self : Dict ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = TFBlenderbotModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__magic_name__ )
@require_tokenizers
@require_tf
class __a (unittest.TestCase ):
__a : Union[str, Any] = ["My friends are cool but they eat too many carbs."]
__a : List[Any] = "facebook/blenderbot-400M-distill"
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCAmelCase_ : List[Any] = self.model.generate(
model_inputs.input_ids , )
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__magic_name__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 125 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A : List[str] = logging.get_logger(__name__)
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = original_name.split("." )[0]
__lowerCAmelCase = key.split("." )
__lowerCAmelCase = int(key_list[key_list.index(_UpperCamelCase ) - 2] )
__lowerCAmelCase = int(key_list[key_list.index(_UpperCamelCase ) - 1] )
__lowerCAmelCase = orig_block_num - offset
__lowerCAmelCase = key.replace(f"{orig_block_num}.{layer_num}.{original_name}" , f"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase , __lowerCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
__lowerCAmelCase = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
__lowerCAmelCase = key[: key.find("proj" )]
__lowerCAmelCase = key.replace(_UpperCamelCase , f"patch_embeddings.{total_embed_found}." )
__lowerCAmelCase = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
__lowerCAmelCase = "poolformer.encoder." + key
if "mlp.fc1" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "norm1" , "before_norm" )
if "norm2" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "norm2" , "after_norm" )
if "layer_scale_1" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
__lowerCAmelCase = key.replace("head" , "classifier" )
__lowerCAmelCase = value
return new_state_dict
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return image
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = PoolFormerConfig()
# set attributes based on model_name
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = model_name[-3:]
__lowerCAmelCase = 1000
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = (1, 1000)
# set config attributes
__lowerCAmelCase = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
__lowerCAmelCase = [2, 2, 6, 2]
__lowerCAmelCase = [64, 128, 320, 512]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 0.9
elif size == "s24":
__lowerCAmelCase = [4, 4, 12, 4]
__lowerCAmelCase = [64, 128, 320, 512]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 0.9
elif size == "s36":
__lowerCAmelCase = [6, 6, 18, 6]
__lowerCAmelCase = [64, 128, 320, 512]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 1e-6
__lowerCAmelCase = 0.9
elif size == "m36":
__lowerCAmelCase = [6, 6, 18, 6]
__lowerCAmelCase = [96, 192, 384, 768]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 1e-6
__lowerCAmelCase = 0.95
elif size == "m48":
__lowerCAmelCase = [8, 8, 24, 8]
__lowerCAmelCase = [96, 192, 384, 768]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 1e-6
__lowerCAmelCase = 0.95
else:
raise ValueError(f"Size {size} not supported" )
# load image processor
__lowerCAmelCase = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
# Prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info(f"Converting model {model_name}..." )
# load original state dict
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
# rename keys
__lowerCAmelCase = rename_keys(_UpperCamelCase )
# create HuggingFace model and load state dict
__lowerCAmelCase = PoolFormerForImageClassification(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# Define image processor
__lowerCAmelCase = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
__lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
__lowerCAmelCase = model(_UpperCamelCase )
__lowerCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
__lowerCAmelCase = torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
__lowerCAmelCase = torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
__lowerCAmelCase = torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
__lowerCAmelCase = torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
__lowerCAmelCase = torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(f"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A : Any = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 370 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [False] * len(_UpperCamelCase )
__lowerCAmelCase = []
queue.append(_UpperCamelCase )
__lowerCAmelCase = True
while queue:
__lowerCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCamelCase )
__lowerCAmelCase = True
__lowerCAmelCase = u
return visited[t]
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [-1] * (len(_UpperCamelCase ))
__lowerCAmelCase = 0
while bfs(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = float("Inf" )
__lowerCAmelCase = sink
while s != source:
# Find the minimum value in select path
__lowerCAmelCase = min(_UpperCamelCase , graph[parent[s]][s] )
__lowerCAmelCase = parent[s]
max_flow += path_flow
__lowerCAmelCase = sink
while v != source:
__lowerCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowerCAmelCase = parent[v]
return max_flow
A : Optional[Any] = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
A , A : Optional[Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 259 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if len(lowercase ) != 2 or len(a[0] ) != 2 or len(lowercase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
snake_case : int = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[list, list, list, list]:
if len(lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
snake_case : Optional[int] = len(lowercase )
snake_case : str = matrix_length // 2
snake_case : int = [[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase )]
snake_case : str = [
[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase ,lowercase )
]
snake_case : Optional[Any] = [[a[i][j] for j in range(lowercase )] for i in range(lowercase )]
snake_case : str = [[a[i][j] for j in range(lowercase )] for i in range(lowercase ,lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[int, int]:
return len(lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
print("""\n""".join(str(lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase ) == (2, 2):
return default_matrix_multiplication(lowercase ,lowercase )
snake_case , snake_case , snake_case , snake_case : Optional[Any] = split_matrix(lowercase )
snake_case , snake_case , snake_case , snake_case : Any = split_matrix(lowercase )
snake_case : List[Any] = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : List[str] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : Tuple = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : str = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : Union[str, Any] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : int = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : List[Any] = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : str = matrix_addition(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
snake_case : List[str] = matrix_addition(lowercase ,lowercase )
snake_case : Any = matrix_addition(lowercase ,lowercase )
snake_case : List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
# construct the new matrix from our 4 quadrants
snake_case : Optional[Any] = []
for i in range(len(lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase )[1] != matrix_dimensions(lowercase )[0]:
snake_case : Optional[Any] = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f"""Matrix A: {matrixa}\n"""
f"""Matrix B: {matrixa}"""
)
raise Exception(lowercase )
snake_case : str = matrix_dimensions(lowercase )
snake_case : Optional[Any] = matrix_dimensions(lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case : Dict = max(*lowercase ,*lowercase )
snake_case : Optional[Any] = int(math.pow(2 ,math.ceil(math.loga(lowercase ) ) ) )
snake_case : Any = matrixa
snake_case : Optional[Any] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case : Optional[int] = actual_strassen(lowercase ,lowercase )
# Removing the additional zeros
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCamelCase : Any = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCamelCase : int = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 124 | 0 |
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> List[Any]:
if height >= 1:
move_tower(height - 1 , _a , _a , _a )
move_disk(_a , _a )
move_tower(height - 1 , _a , _a , _a )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] ) -> str:
print("""moving disk from""" , _a , """to""" , _a )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Any = int(input("""Height of hanoi: """ ).strip() )
move_tower(_a , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 363 | '''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowercase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str ):
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : List[str] , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ):
if audio_length_in_s is None:
lowercase_ : List[Any] = self.unet.config.sample_size / self.unet.config.sample_rate
lowercase_ : Dict = audio_length_in_s * self.unet.config.sample_rate
lowercase_ : Any = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
lowercase_ : List[Any] = int(lowercase_ )
if sample_size % down_scale_factor != 0:
lowercase_ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
""" process.""" )
lowercase_ : Any = int(lowercase_ )
lowercase_ : List[str] = next(iter(self.unet.parameters() ) ).dtype
lowercase_ : List[str] = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowercase_ : Any = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
# set step values
self.scheduler.set_timesteps(lowercase_ , device=audio.device )
lowercase_ : Optional[Any] = self.scheduler.timesteps.to(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase_ : Dict = self.unet(lowercase_ , lowercase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowercase_ : List[str] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowercase_ : str = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowercase_ : Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase_ )
| 21 | 0 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowercase__ : Optional[int] = datasets.load_iris()
lowercase__ : str = np.array(data['''data'''])
lowercase__ : Optional[Any] = np.array(data['''target'''])
lowercase__ : Tuple = data['''target_names''']
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = train_test_split(X, y)
def _lowerCAmelCase ( __snake_case : str , __snake_case : List[Any] ) -> Optional[Any]:
return np.linalg.norm(np.array(__snake_case ) - np.array(__snake_case ) )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Tuple=5 ) -> Tuple:
__A : str = zip(__snake_case , __snake_case )
# List of distances of all points from the point to be classified
__A : int = []
for data_point in data:
__A : Tuple = euclidean_distance(data_point[0] , __snake_case )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__A : int = [i[1] for i in sorted(__snake_case )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__A : str = Counter(__snake_case ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 190 |
'''simple docstring'''
import random
def _lowerCAmelCase ( __snake_case : int , __snake_case : float , __snake_case : bool = False ) -> dict:
__A : dict = {i: [] for i in range(__snake_case )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(__snake_case )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(__snake_case ):
for j in range(i + 1 , __snake_case ):
if random.random() < probability:
graph[i].append(__snake_case )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(__snake_case )
return graph
def _lowerCAmelCase ( __snake_case : int ) -> dict:
return {
i: [j for j in range(__snake_case ) if i != j] for i in range(__snake_case )
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 190 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def A (__lowerCamelCase :Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , __lowerCamelCase , )
if isinstance(__lowerCamelCase , torch.Tensor ):
return image
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
_lowerCAmelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase , _lowerCAmelCase = image[0].size
_lowerCAmelCase , _lowerCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_lowerCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
_lowerCAmelCase = np.concatenate(__lowerCamelCase , axis=0 )
_lowerCAmelCase = np.array(__lowerCamelCase ).astype(np.floataa ) / 255.0
_lowerCAmelCase = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase = 2.0 * image - 1.0
_lowerCAmelCase = torch.from_numpy(__lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase = torch.cat(__lowerCamelCase , dim=0 )
return image
def A (__lowerCamelCase :Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(__lowerCamelCase , torch.Tensor ):
return mask
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
_lowerCAmelCase = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
_lowerCAmelCase , _lowerCAmelCase = mask[0].size
_lowerCAmelCase , _lowerCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_lowerCAmelCase = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
_lowerCAmelCase = np.concatenate(__lowerCamelCase , axis=0 )
_lowerCAmelCase = mask.astype(np.floataa ) / 255.0
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = torch.from_numpy(__lowerCamelCase )
elif isinstance(mask[0] , torch.Tensor ):
_lowerCAmelCase = torch.cat(__lowerCamelCase , dim=0 )
return mask
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : UNetaDModel
_lowercase : RePaintScheduler
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self , _lowercase , _lowercase , _lowercase = 250 , _lowercase = 0.0 , _lowercase = 10 , _lowercase = 10 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ):
"""simple docstring"""
_lowerCAmelCase = image
_lowerCAmelCase = _preprocess_image(_lowercase )
_lowerCAmelCase = original_image.to(device=self.device , dtype=self.unet.dtype )
_lowerCAmelCase = _preprocess_mask(_lowercase )
_lowerCAmelCase = mask_image.to(device=self.device , dtype=self.unet.dtype )
_lowerCAmelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_lowerCAmelCase = original_image.shape
_lowerCAmelCase = randn_tensor(_lowercase , generator=_lowercase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_lowercase , _lowercase , _lowercase , self.device )
_lowerCAmelCase = eta
_lowerCAmelCase = self.scheduler.timesteps[0] + 1
_lowerCAmelCase = generator[0] if isinstance(_lowercase , _lowercase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
_lowerCAmelCase = self.unet(_lowercase , _lowercase ).sample
# compute previous image: x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_lowerCAmelCase = self.scheduler.undo_step(_lowercase , _lowercase , _lowercase )
_lowerCAmelCase = t
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 229 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
pass
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = data
_lowerCAmelCase = None
def __iter__( self ):
"""simple docstring"""
_lowerCAmelCase = self
_lowerCAmelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(_lowercase )
yield node.data
_lowerCAmelCase = node.next_node
@property
def _lowercase ( self ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
_lowercase = Node(1)
_lowercase = Node(2)
_lowercase = Node(3)
_lowercase = Node(4)
print(root_node.has_loop) # False
_lowercase = root_node.next_node
print(root_node.has_loop) # True
_lowercase = Node(5)
_lowercase = Node(6)
_lowercase = Node(5)
_lowercase = Node(6)
print(root_node.has_loop) # False
_lowercase = Node(1)
print(root_node.has_loop) # False
| 229 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowerCAmelCase: Optional[int] = logging.get_logger(__name__)
lowerCAmelCase: str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase: Optional[int] = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
lowerCAmelCase: Dict = {
"distilbert-base-uncased": 5_1_2,
"distilbert-base-uncased-distilled-squad": 5_1_2,
"distilbert-base-cased": 5_1_2,
"distilbert-base-cased-distilled-squad": 5_1_2,
"distilbert-base-german-cased": 5_1_2,
"distilbert-base-multilingual-cased": 5_1_2,
}
lowerCAmelCase: List[str] = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class a__( lowercase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = ['input_ids', 'attention_mask']
lowercase__ = DistilBertTokenizer
def __init__( self : Any , __snake_case : List[Any]=None , __snake_case : List[Any]=None , __snake_case : List[Any]=True , __snake_case : Dict="[UNK]" , __snake_case : Optional[Any]="[SEP]" , __snake_case : int="[PAD]" , __snake_case : List[str]="[CLS]" , __snake_case : Any="[MASK]" , __snake_case : Union[str, Any]=True , __snake_case : str=None , **__snake_case : int , ):
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
a : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , __lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __lowercase ) != tokenize_chinese_chars
):
a : List[Any] = getattr(__lowercase , normalizer_state.pop('type' ) )
a : Dict = do_lower_case
a : Tuple = strip_accents
a : Union[str, Any] = tokenize_chinese_chars
a : str = normalizer_class(**__lowercase )
a : List[str] = do_lower_case
def lowercase_ ( self : Dict , __snake_case : int , __snake_case : int=None ):
a : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : str , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : int = [self.sep_token_id]
a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
a : Tuple = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase ) | 297 |
from functools import lru_cache
@lru_cache
def lowerCamelCase__ ( __lowerCamelCase : int ):
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 | 0 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
_snake_case = logging.get_logger(__name__)
_snake_case = r"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"
class lowercase ( _a ):
"""simple docstring"""
@add_start_docstrings(__lowerCAmelCase )
def __call__( self , _a , _a , **_a ) -> Any:
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class lowercase ( _a ):
"""simple docstring"""
def __init__( self , _a , _a = None ) -> Dict:
_A : List[str] = max_length
_A : Dict = max_position_embeddings
@add_start_docstrings(__lowerCAmelCase )
def __call__( self , _a , _a , **_a ) -> str:
_A : str = input_ids.shape[-1]
_A : List[str] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class lowercase ( _a ):
"""simple docstring"""
def __init__( self , _a , _a ) -> Optional[Any]:
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"""with `max_length = start_length + max_new_tokens` instead.""" , __lowerCAmelCase , )
_A : Union[str, Any] = start_length
_A : Union[str, Any] = max_new_tokens
_A : Optional[Any] = start_length + max_new_tokens
@add_start_docstrings(__lowerCAmelCase )
def __call__( self , _a , _a , **_a ) -> int:
return input_ids.shape[-1] >= self.max_length
class lowercase ( _a ):
"""simple docstring"""
def __init__( self , _a , _a = None ) -> int:
_A : Dict = max_time
_A : List[Any] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__lowerCAmelCase )
def __call__( self , _a , _a , **_a ) -> Optional[int]:
return time.time() - self.initial_timestamp > self.max_time
class lowercase ( _a ):
"""simple docstring"""
@add_start_docstrings(__lowerCAmelCase )
def __call__( self , _a , _a , **_a ) -> List[str]:
return any(criteria(__lowerCAmelCase , __lowerCAmelCase ) for criteria in self )
@property
def a__ ( self ) -> List[str]:
for stopping_criterium in self:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return stopping_criterium.max_length
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return stopping_criterium.max_length
return None
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = stopping_criteria.max_length
_A : Optional[int] = deepcopy(a__ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""",a__ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=a__ ) )
return new_stopping_criteria
| 371 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowercase ( unittest.TestCase ):
def a__ ( self ) -> List[str]:
debug_launcher(test_script.main )
def a__ ( self ) -> Any:
debug_launcher(test_ops.main )
| 343 | 0 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Any , *UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = eval_examples
__magic_name__ = post_process_function
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str = "eval" ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.eval_dataset if eval_dataset is None else eval_dataset
__magic_name__ = self.get_eval_dataloader(UpperCamelCase__ )
__magic_name__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ = self.compute_metrics
__magic_name__ = None
__magic_name__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__magic_name__ = time.time()
try:
__magic_name__ = eval_loop(
UpperCamelCase__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , )
finally:
__magic_name__ = compute_metrics
__magic_name__ = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__magic_name__ = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions )
__magic_name__ = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ = metrics.pop(UpperCamelCase__ )
metrics.update(output.metrics )
else:
__magic_name__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__magic_name__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase__ )
return metrics
def _lowercase ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str = "test" ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.get_test_dataloader(UpperCamelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ = self.compute_metrics
__magic_name__ = None
__magic_name__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__magic_name__ = time.time()
try:
__magic_name__ = eval_loop(
UpperCamelCase__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , )
finally:
__magic_name__ = compute_metrics
__magic_name__ = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__magic_name__ = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions , """predict""" )
__magic_name__ = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ = metrics.pop(UpperCamelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase__ )
| 88 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ : str = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=4 , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_choices
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_attention_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = True
snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _snake_case ( lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : List[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = FlaxRobertaModelTester(self )
@slow
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained("roberta-base" , from_pt=a__ )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(a__ )
| 92 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
_SCREAMING_SNAKE_CASE : Any = False
class _snake_case ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
snake_case_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
snake_case_ = torch.manual_seed(0 )
snake_case_ = pipe(
image=a__ , generator=a__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
snake_case_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 92 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowercase ( snake_case_ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
__A : Optional[Any] = filter(lambda snake_case_ : p.requires_grad ,model.parameters() )
__A : int = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a_ = logging.getLogger(__name__)
def __lowercase ( snake_case_ : Any ,snake_case_ : int ) ->Optional[int]:
'''simple docstring'''
if metric == "rouge2":
__A : List[str] = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
__A : str = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
__A : List[Any] = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
__A : Any = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
__A : List[str] = ModelCheckpoint(
dirpath=snake_case_ ,filename=snake_case_ ,monitor=F"""val_{metric}""" ,mode='''max''' ,save_top_k=1 ,every_n_epochs=1 ,)
return checkpoint_callback
def __lowercase ( snake_case_ : str ,snake_case_ : Optional[Any] ) ->Any:
'''simple docstring'''
return EarlyStopping(
monitor=F"""val_{metric}""" ,mode='''min''' if '''loss''' in metric else '''max''' ,patience=snake_case_ ,verbose=snake_case_ ,)
class __snake_case ( pl.Callback ):
"""simple docstring"""
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[Any] = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__lowerCamelCase )
@rank_zero_only
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=True ):
'''simple docstring'''
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
__A : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
__A : Optional[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
__A : List[Any] = od / '''test_results.txt'''
__A : str = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__A : int = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
__A : Any = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__lowerCamelCase )
generations_file.parent.mkdir(exist_ok=__lowerCamelCase )
with open(__lowerCamelCase , '''a+''' ) as writer:
for key in sorted(__lowerCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
__A : Optional[int] = metrics[key]
if isinstance(__lowerCamelCase , torch.Tensor ):
__A : Tuple = val.item()
__A : List[str] = F"""{key}: {val:.6f}\n"""
writer.write(__lowerCamelCase )
if not save_generations:
return
if "preds" in metrics:
__A : Optional[int] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(__lowerCamelCase )
@rank_zero_only
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
try:
__A : Union[str, Any] = pl_module.model.model.num_parameters()
except AttributeError:
__A : Optional[int] = pl_module.model.num_parameters()
__A : int = count_trainable_parameters(__lowerCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__lowerCamelCase , __lowerCamelCase , '''test''' )
@rank_zero_only
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 179 |
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = jnp.floataa
_lowerCamelCase = True
def UpperCamelCase__( self ):
'''simple docstring'''
super().setup()
__A : List[Any] = nn.Dense(5 , dtype=self.dtype )
def __call__( self , *__lowerCamelCase , **__lowerCamelCase ):
'''simple docstring'''
__A : Tuple = super().__call__(*__lowerCamelCase , **__lowerCamelCase )
__A : Optional[int] = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class __snake_case ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = FlaxBigBirdForNaturalQuestionsModule
def __lowercase ( snake_case_ : Tuple ,snake_case_ : Optional[int] ,snake_case_ : int ,snake_case_ : Tuple ,snake_case_ : Any ,snake_case_ : Any ) ->List[Any]:
'''simple docstring'''
def cross_entropy(snake_case_ : str ,snake_case_ : Optional[Any] ,snake_case_ : Tuple=None ):
__A : Dict = logits.shape[-1]
__A : Dict = (labels[..., None] == jnp.arange(snake_case_ )[None]).astype('''f4''' )
__A : int = jax.nn.log_softmax(snake_case_ ,axis=-1 )
__A : Optional[int] = -jnp.sum(labels * logits ,axis=-1 )
if reduction is not None:
__A : Optional[int] = reduction(snake_case_ )
return loss
__A : str = partial(snake_case_ ,reduction=jnp.mean )
__A : Dict = cross_entropy(snake_case_ ,snake_case_ )
__A : List[str] = cross_entropy(snake_case_ ,snake_case_ )
__A : str = cross_entropy(snake_case_ ,snake_case_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = "google/bigbird-roberta-base"
_lowerCamelCase = 30_00
_lowerCamelCase = 1_05_00
_lowerCamelCase = 1_28
_lowerCamelCase = 3
_lowerCamelCase = 1
_lowerCamelCase = 5
# tx_args
_lowerCamelCase = 3e-5
_lowerCamelCase = 0.0
_lowerCamelCase = 2_00_00
_lowerCamelCase = 0.0_0_9_5
_lowerCamelCase = "bigbird-roberta-natural-questions"
_lowerCamelCase = "training-expt"
_lowerCamelCase = "data/nq-training.jsonl"
_lowerCamelCase = "data/nq-validation.jsonl"
def UpperCamelCase__( self ):
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=__lowerCamelCase )
__A : Dict = os.path.join(self.base_dir , self.save_dir )
__A : Dict = self.batch_size_per_device * jax.device_count()
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 40_96 # no dynamic padding on TPUs
def __call__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Optional[int] = self.collate_fn(__lowerCamelCase )
__A : Tuple = jax.tree_util.tree_map(__lowerCamelCase , __lowerCamelCase )
return batch
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A , __A : List[Any] = self.fetch_inputs(features['''input_ids'''] )
__A : Union[str, Any] = {
'''input_ids''': jnp.array(__lowerCamelCase , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(__lowerCamelCase , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Any = [self._fetch_inputs(__lowerCamelCase ) for ids in input_ids]
return zip(*__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase ):
'''simple docstring'''
__A : Any = [1 for _ in range(len(__lowerCamelCase ) )]
while len(__lowerCamelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __lowercase ( snake_case_ : List[Any] ,snake_case_ : Optional[Any] ,snake_case_ : List[str]=None ) ->Optional[int]:
'''simple docstring'''
if seed is not None:
__A : List[Any] = dataset.shuffle(seed=snake_case_ )
for i in range(len(snake_case_ ) // batch_size ):
__A : Tuple = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(snake_case_ )
@partial(jax.pmap ,axis_name='''batch''' )
def __lowercase ( snake_case_ : str ,snake_case_ : Union[str, Any] ,**snake_case_ : List[str] ) ->Tuple:
'''simple docstring'''
def loss_fn(snake_case_ : List[str] ):
__A : str = model_inputs.pop('''start_labels''' )
__A : str = model_inputs.pop('''end_labels''' )
__A : int = model_inputs.pop('''pooled_labels''' )
__A : Dict = state.apply_fn(**snake_case_ ,params=snake_case_ ,dropout_rng=snake_case_ ,train=snake_case_ )
__A , __A , __A : Union[str, Any] = outputs
return state.loss_fn(
snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,)
__A , __A : int = jax.random.split(snake_case_ )
__A : str = jax.value_and_grad(snake_case_ )
__A , __A : Optional[int] = grad_fn(state.params )
__A : List[str] = jax.lax.pmean({'''loss''': loss} ,axis_name='''batch''' )
__A : List[str] = jax.lax.pmean(snake_case_ ,'''batch''' )
__A : str = state.apply_gradients(grads=snake_case_ )
return state, metrics, new_drp_rng
@partial(jax.pmap ,axis_name='''batch''' )
def __lowercase ( snake_case_ : int ,**snake_case_ : Union[str, Any] ) ->List[str]:
'''simple docstring'''
__A : Tuple = model_inputs.pop('''start_labels''' )
__A : Dict = model_inputs.pop('''end_labels''' )
__A : int = model_inputs.pop('''pooled_labels''' )
__A : List[str] = state.apply_fn(**snake_case_ ,params=state.params ,train=snake_case_ )
__A , __A , __A : Dict = outputs
__A : Optional[int] = state.loss_fn(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
__A : List[str] = jax.lax.pmean({'''loss''': loss} ,axis_name='''batch''' )
return metrics
class __snake_case ( train_state.TrainState ):
"""simple docstring"""
_lowerCamelCase = struct.field(pytree_node=SCREAMING_SNAKE_CASE__ )
@dataclass
class __snake_case :
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = None
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ):
'''simple docstring'''
__A : Tuple = model.params
__A : Union[str, Any] = TrainState.create(
apply_fn=model.__call__ , params=__lowerCamelCase , tx=__lowerCamelCase , loss_fn=__lowerCamelCase , )
if ckpt_dir is not None:
__A , __A , __A , __A , __A : Optional[Any] = restore_checkpoint(__lowerCamelCase , __lowerCamelCase )
__A : List[Any] = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
__A , __A : List[str] = build_tx(**__lowerCamelCase )
__A : int = train_state.TrainState(
step=__lowerCamelCase , apply_fn=model.__call__ , params=__lowerCamelCase , tx=__lowerCamelCase , opt_state=__lowerCamelCase , )
__A : int = args
__A : Optional[Any] = data_collator
__A : Tuple = lr
__A : List[Any] = params
__A : Dict = jax_utils.replicate(__lowerCamelCase )
return state
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : List[Any] = self.args
__A : Dict = len(__lowerCamelCase ) // args.batch_size
__A : List[Any] = jax.random.PRNGKey(0 )
__A : Optional[Any] = jax.random.split(__lowerCamelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
__A : Tuple = jnp.array(0 , dtype=jnp.floataa )
__A : Optional[Any] = get_batched_dataset(__lowerCamelCase , args.batch_size , seed=__lowerCamelCase )
__A : Union[str, Any] = 0
for batch in tqdm(__lowerCamelCase , total=__lowerCamelCase , desc=F"""Running EPOCH-{epoch}""" ):
__A : Optional[Any] = self.data_collator(__lowerCamelCase )
__A , __A , __A : Union[str, Any] = self.train_step_fn(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
__A : Union[str, Any] = jax_utils.unreplicate(state.step )
__A : Optional[int] = running_loss.item() / i
__A : List[Any] = self.scheduler_fn(state_step - 1 )
__A : Union[str, Any] = self.evaluate(__lowerCamelCase , __lowerCamelCase )
__A : Optional[Any] = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(__lowerCamelCase ) )
self.logger.log(__lowerCamelCase , commit=__lowerCamelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=__lowerCamelCase )
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = get_batched_dataset(__lowerCamelCase , self.args.batch_size )
__A : int = len(__lowerCamelCase ) // self.args.batch_size
__A : Optional[Any] = jnp.array(0 , dtype=jnp.floataa )
__A : Dict = 0
for batch in tqdm(__lowerCamelCase , total=__lowerCamelCase , desc='''Evaluating ... ''' ):
__A : List[str] = self.data_collator(__lowerCamelCase )
__A : Union[str, Any] = self.val_step_fn(__lowerCamelCase , **__lowerCamelCase )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
__A : Dict = jax_utils.unreplicate(__lowerCamelCase )
print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=''' ... ''' )
self.model_save_fn(__lowerCamelCase , params=state.params )
with open(os.path.join(__lowerCamelCase , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(__lowerCamelCase , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(__lowerCamelCase , '''data_collator.joblib''' ) )
with open(os.path.join(__lowerCamelCase , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , __lowerCamelCase )
print('''DONE''' )
def __lowercase ( snake_case_ : int ,snake_case_ : Dict ) ->Optional[int]:
'''simple docstring'''
print(F"""RESTORING CHECKPOINT FROM {save_dir}""" ,end=''' ... ''' )
with open(os.path.join(snake_case_ ,'''flax_model.msgpack''' ) ,'''rb''' ) as f:
__A : List[Any] = from_bytes(state.params ,f.read() )
with open(os.path.join(snake_case_ ,'''opt_state.msgpack''' ) ,'''rb''' ) as f:
__A : Optional[int] = from_bytes(state.opt_state ,f.read() )
__A : Tuple = joblib.load(os.path.join(snake_case_ ,'''args.joblib''' ) )
__A : List[str] = joblib.load(os.path.join(snake_case_ ,'''data_collator.joblib''' ) )
with open(os.path.join(snake_case_ ,'''training_state.json''' ) ,'''r''' ) as f:
__A : Dict = json.load(snake_case_ )
__A : int = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def __lowercase ( snake_case_ : List[str] ,snake_case_ : Any ,snake_case_ : Dict ,snake_case_ : str ) ->List[str]:
'''simple docstring'''
__A : str = num_train_steps - warmup_steps
__A : Union[str, Any] = optax.linear_schedule(init_value=snake_case_ ,end_value=snake_case_ ,transition_steps=snake_case_ )
__A : Optional[Any] = optax.linear_schedule(init_value=snake_case_ ,end_value=1e-7 ,transition_steps=snake_case_ )
__A : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] ,boundaries=[warmup_steps] )
return lr
def __lowercase ( snake_case_ : List[str] ,snake_case_ : List[Any] ,snake_case_ : Union[str, Any] ,snake_case_ : List[Any] ,snake_case_ : str ) ->List[str]:
'''simple docstring'''
def weight_decay_mask(snake_case_ : List[Any] ):
__A : List[Any] = traverse_util.flatten_dict(snake_case_ )
__A : int = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(snake_case_ )
__A : List[Any] = scheduler_fn(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
__A : List[str] = optax.adamw(learning_rate=snake_case_ ,weight_decay=snake_case_ ,mask=snake_case_ )
return tx, lr
| 179 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _A :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : List[str] = seq_length
__UpperCAmelCase : int = is_training
__UpperCAmelCase : Optional[int] = use_input_mask
__UpperCAmelCase : Tuple = use_token_type_ids
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = type_vocab_size
__UpperCAmelCase : Optional[Any] = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Optional[int] = num_labels
__UpperCAmelCase : Tuple = num_choices
__UpperCAmelCase : Optional[Any] = scope
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[Any] = None
if self.use_input_mask:
__UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : int = None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__A , )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = FalconModel(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase : List[Any] = model(__A , attention_mask=__A )
__UpperCAmelCase : str = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[str] = FalconModel(__A )
model.to(__A )
model.eval()
__UpperCAmelCase : int = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )
__UpperCAmelCase : Optional[Any] = model(
__A , attention_mask=__A , encoder_hidden_states=__A , )
__UpperCAmelCase : List[str] = model(__A , attention_mask=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = FalconForCausalLM(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase : Optional[Any] = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : str = True
__UpperCAmelCase : List[str] = FalconForCausalLM(config=__A )
model.to(__A )
model.eval()
# first forward pass
__UpperCAmelCase : int = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , use_cache=__A , )
__UpperCAmelCase : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCAmelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCAmelCase : List[str] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCAmelCase : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCAmelCase : int = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , output_hidden_states=__A , )["""hidden_states"""][0]
__UpperCAmelCase : Dict = model(
__A , attention_mask=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , past_key_values=__A , output_hidden_states=__A , )["""hidden_states"""][0]
# select random slice
__UpperCAmelCase : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1E-3 ) )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
__UpperCAmelCase
) : Optional[Any] = config_and_inputs
__UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : str = (FalconForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
"feature-extraction": FalconModel,
"text-classification": FalconForSequenceClassification,
"text-generation": FalconForCausalLM,
"question-answering": FalconForQuestionAnswering,
"token-classification": FalconForTokenClassification,
"zero-shot": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : int = False
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = FalconModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__A , hidden_size=37 )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__UpperCAmelCase : int = alibi
self.model_tester.create_and_check_model(__A , *__A )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[Any] = 3
__UpperCAmelCase : Any = input_dict["""input_ids"""]
__UpperCAmelCase : int = input_ids.ne(1 ).to(__A )
__UpperCAmelCase : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase : Dict = FalconForSequenceClassification(__A )
model.to(__A )
model.eval()
__UpperCAmelCase : Any = model(__A , attention_mask=__A , labels=__A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : List[str] = 3
__UpperCAmelCase : Union[str, Any] = """single_label_classification"""
__UpperCAmelCase : int = input_dict["""input_ids"""]
__UpperCAmelCase : Tuple = input_ids.ne(1 ).to(__A )
__UpperCAmelCase : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCAmelCase : Any = FalconForSequenceClassification(__A )
model.to(__A )
model.eval()
__UpperCAmelCase : int = model(__A , attention_mask=__A , labels=__A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""]
__UpperCAmelCase : Any = FalconForCausalLM(__A )
model.to(__A )
model.eval()
__UpperCAmelCase : Optional[int] = model(__A , use_cache=__A )
__UpperCAmelCase : Union[str, Any] = input_ids.shape[0]
__UpperCAmelCase : Tuple = model._convert_to_rw_cache(result.past_key_values )
__UpperCAmelCase : Optional[int] = model._convert_cache_to_standard_format(__A , __A )
for layer in range(len(__A ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[Any] = 3
__UpperCAmelCase : Optional[int] = """multi_label_classification"""
__UpperCAmelCase : List[str] = input_dict["""input_ids"""]
__UpperCAmelCase : Optional[int] = input_ids.ne(1 ).to(__A )
__UpperCAmelCase : Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCAmelCase : Tuple = FalconForSequenceClassification(__A )
model.to(__A )
model.eval()
__UpperCAmelCase : Optional[Any] = model(__A , attention_mask=__A , labels=__A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __A ( self ) -> Tuple:
'''simple docstring'''
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__A , """use_cache""" ):
return
__UpperCAmelCase : Optional[Any] = model_class(__A ).to(__A )
if "use_cache" not in inputs:
__UpperCAmelCase : Any = True
__UpperCAmelCase : List[str] = model(**__A )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__UpperCAmelCase : int = (
getattr(__A , """decoder_layers""" , __A )
or getattr(__A , """num_decoder_layers""" , __A )
or config.num_hidden_layers
)
__UpperCAmelCase : Optional[int] = getattr(__A , """num_kv_heads""" , config.num_attention_heads )
__UpperCAmelCase : int = getattr(__A , """d_model""" , config.hidden_size )
__UpperCAmelCase : Tuple = embed_dim // num_attention_heads
__UpperCAmelCase : List[str] = outputs["""past_key_values"""]
self.assertEqual(len(__A ) , __A )
__UpperCAmelCase : List[str] = inputs["""input_ids"""].shape
for i in range(__A ):
if config.new_decoder_architecture:
__UpperCAmelCase : Tuple = config.num_attention_heads
elif config.multi_query:
__UpperCAmelCase : List[Any] = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
__UpperCAmelCase : int = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(__A )
__UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__A )
__UpperCAmelCase : List[str] = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
__UpperCAmelCase : int = model.generate(**__A , do_sample=__A , max_new_tokens=19 )
__UpperCAmelCase : List[str] = tokenizer.batch_decode(__A )[0]
self.assertEqual(__A , __A )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(__A )
__UpperCAmelCase : str = FalconForCausalLM.from_pretrained(__A )
model.eval()
model.to(__A )
__UpperCAmelCase : List[str] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__A )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__A , do_sample=__A , max_new_tokens=4 )
model.generate(**__A , do_sample=__A , max_new_tokens=4 )
model.generate(**__A , num_beams=2 , max_new_tokens=4 )
@slow
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(__A )
__UpperCAmelCase : Dict = FalconForCausalLM.from_pretrained(__A )
model.eval()
model.to(device=__A )
__UpperCAmelCase : Tuple = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__A )
# Test results are the same with and without cache
__UpperCAmelCase : List[Any] = model.generate(**__A , do_sample=__A , max_new_tokens=20 , use_cache=__A )
__UpperCAmelCase : Dict = model.generate(**__A , do_sample=__A , max_new_tokens=20 , use_cache=__A )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 351 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> str:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Tuple = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : str = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[str] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> int:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[int] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Any = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : List[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
class _A ( metaclass=__SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Optional[Any] = ["sentencepiece"]
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
requires_backends(self , ["""sentencepiece"""] )
| 16 | 0 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__A = "pt"
elif is_tf_available():
__A = "tf"
else:
__A = "jax"
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = PerceiverTokenizer
_UpperCAmelCase :Dict = False
def _snake_case ( self ):
super().setUp()
lowercase__: Any = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self ):
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def _snake_case ( self , **_UpperCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=20 , _UpperCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowercase__: Any = []
for i in range(len(_UpperCAmelCase ) ):
try:
lowercase__: Optional[int] = tokenizer.decode([i] , clean_up_tokenization_spaces=_UpperCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__: int = list(filter(lambda _UpperCAmelCase : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , _UpperCAmelCase ) )
lowercase__: Tuple = list(filter(lambda _UpperCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_UpperCAmelCase ) , _UpperCAmelCase ) )
if max_length is not None and len(_UpperCAmelCase ) > max_length:
lowercase__: str = toks[:max_length]
if min_length is not None and len(_UpperCAmelCase ) < min_length and len(_UpperCAmelCase ) > 0:
while len(_UpperCAmelCase ) < min_length:
lowercase__: List[Any] = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__: Dict = [t[0] for t in toks]
# Ensure consistency
lowercase__: Any = tokenizer.decode(_UpperCAmelCase , clean_up_tokenization_spaces=_UpperCAmelCase )
if " " not in output_txt and len(_UpperCAmelCase ) > 1:
lowercase__: Tuple = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_UpperCAmelCase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_UpperCAmelCase )
)
if with_prefix_space:
lowercase__: Dict = ''' ''' + output_txt
lowercase__: Tuple = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
return output_txt, output_ids
def _snake_case ( self ):
lowercase__: int = self.perceiver_tokenizer
lowercase__: Tuple = '''Unicode €.'''
lowercase__: Optional[int] = tokenizer(_UpperCAmelCase )
lowercase__: List[str] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , _UpperCAmelCase )
# decoding
lowercase__: Optional[int] = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '''[CLS]Unicode €.[SEP]''' )
lowercase__: List[Any] = tokenizer('''e è é ê ë''' )
lowercase__: Optional[int] = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , _UpperCAmelCase )
# decoding
lowercase__: List[Any] = tokenizer.decode(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def _snake_case ( self ):
lowercase__: Union[str, Any] = self.perceiver_tokenizer
lowercase__: List[str] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
lowercase__: List[str] = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
lowercase__: Union[str, Any] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
if FRAMEWORK != "jax":
lowercase__: Tuple = list(batch.input_ids.numpy()[0] )
else:
lowercase__: int = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def _snake_case ( self ):
lowercase__: Optional[int] = self.perceiver_tokenizer
lowercase__: int = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase__: List[str] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , _UpperCAmelCase )
self.assertIn('''attention_mask''' , _UpperCAmelCase )
self.assertNotIn('''decoder_input_ids''' , _UpperCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.perceiver_tokenizer
lowercase__: Tuple = [
'''Summary of the text.''',
'''Another summary.''',
]
lowercase__: List[Any] = tokenizer(
text_target=_UpperCAmelCase , max_length=32 , padding='''max_length''' , truncation=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _snake_case ( self ):
# safety check on max_len default value so we are sure the test works
lowercase__: Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowercase__: Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__: Dict = tempfile.mkdtemp()
lowercase__: Dict = ''' He is very happy, UNwant\u00E9d,running'''
lowercase__: List[str] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__: Dict = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__: Union[str, Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
shutil.rmtree(_UpperCAmelCase )
lowercase__: Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__: str = tempfile.mkdtemp()
lowercase__: Tuple = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowercase__: Dict = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowercase__: Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
lowercase__: Tuple = tokenizer.__class__.from_pretrained(_UpperCAmelCase )
lowercase__: Union[str, Any] = after_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowercase__: List[str] = tokenizer.__class__.from_pretrained(_UpperCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowercase__: List[str] = json.load(_UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowercase__: List[str] = json.load(_UpperCAmelCase )
lowercase__: Union[str, Any] = [F"""<extra_id_{i}>""" for i in range(125 )]
lowercase__: Optional[Any] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
lowercase__: Optional[int] = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(_UpperCAmelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
with open(os.path.join(_UpperCAmelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__: Dict = tokenizer_class.from_pretrained(
_UpperCAmelCase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__: Optional[Any] = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_UpperCAmelCase )]
lowercase__: str = tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _snake_case ( self ):
lowercase__: List[Any] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def _snake_case ( self ):
pass
def _snake_case ( self ):
pass
def _snake_case ( self ):
pass
def _snake_case ( self ):
pass
def _snake_case ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowercase__: Any = self.get_tokenizers(fast=_UpperCAmelCase , do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowercase__: Union[str, Any] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]''']
lowercase__: List[Any] = tokenizer.convert_tokens_to_string(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
| 177 | """simple docstring"""
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase = False ) -> float:
if not arr:
return 0
lowercase__: Any = 0 if allow_empty_subarrays else float('''-inf''' )
lowercase__: Union[str, Any] = 0.0
for num in arr:
lowercase__: List[str] = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase__: int = max(__UpperCAmelCase , __UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__A = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 177 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A_ :
lowerCAmelCase__ = XGLMConfig
lowerCAmelCase__ = {}
lowerCAmelCase__ = 'gelu'
def __init__( self: int ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Optional[Any]=14 ,__lowerCAmelCase: Any=7 ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Optional[Any]=True ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: Optional[Any]=99 ,__lowerCAmelCase: int=32 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: str=4 ,__lowerCAmelCase: Optional[int]=37 ,__lowerCAmelCase: str="gelu" ,__lowerCAmelCase: Tuple=0.1 ,__lowerCAmelCase: List[Any]=0.1 ,__lowerCAmelCase: str=512 ,__lowerCAmelCase: List[str]=0.02 ,):
'''simple docstring'''
_lowerCamelCase : Dict = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : Optional[int] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : Any = use_input_mask
_lowerCamelCase : Tuple = use_labels
_lowerCamelCase : str = vocab_size
_lowerCamelCase : int = d_model
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Dict = ffn_dim
_lowerCamelCase : List[str] = activation_function
_lowerCamelCase : Dict = activation_dropout
_lowerCamelCase : Optional[int] = attention_dropout
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : List[str] = initializer_range
_lowerCamelCase : Any = None
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Union[str, Any] = 2
_lowerCamelCase : Optional[Any] = 1
def _lowercase ( self: List[Any] ):
'''simple docstring'''
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) ,clip_value_min=0 ,clip_value_max=3 )
_lowerCamelCase : Union[str, Any] = None
if self.use_input_mask:
_lowerCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : Dict = self.get_config()
_lowerCamelCase : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=__lowerCAmelCase ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=__lowerCAmelCase ,)
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
_lowerCamelCase
) : Union[str, Any] = config_and_inputs
_lowerCamelCase : Optional[int] = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class A_ ( _a , _a , unittest.TestCase ):
lowerCAmelCase__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCAmelCase__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def _lowercase ( self: List[str] ):
'''simple docstring'''
_lowerCamelCase : Dict = TFXGLMModelTester(self )
_lowerCamelCase : List[str] = ConfigTester(self ,config_class=__lowerCAmelCase ,n_embd=37 )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Tuple = TFXGLMModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def _lowercase ( self: Dict ):
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class A_ ( unittest.TestCase ):
@slow
def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
_lowerCamelCase : List[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
_lowerCamelCase : int = tf.convert_to_tensor([[2, 268, 9_865]] ,dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_lowerCamelCase : Dict = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
_lowerCamelCase : int = model.generate(__lowerCAmelCase ,do_sample=__lowerCAmelCase ,num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() ,__lowerCAmelCase )
@slow
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Tuple = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
_lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
_lowerCamelCase : Dict = tokenizer("Today is a nice day and" ,return_tensors="tf" )
_lowerCamelCase : Dict = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
_lowerCamelCase : Union[str, Any] = model.generate(__lowerCAmelCase ,do_sample=__lowerCAmelCase ,seed=[7, 0] )
_lowerCamelCase : int = tokenizer.decode(output_ids[0] ,skip_special_tokens=__lowerCAmelCase )
_lowerCamelCase : List[str] = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(__lowerCAmelCase ,__lowerCAmelCase )
@slow
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
_lowerCamelCase : Optional[int] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
_lowerCamelCase : Optional[int] = "left"
# use different length sentences to test batching
_lowerCamelCase : Optional[Any] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
_lowerCamelCase : Tuple = tokenizer(__lowerCAmelCase ,return_tensors="tf" ,padding=__lowerCAmelCase )
_lowerCamelCase : int = inputs["input_ids"]
_lowerCamelCase : List[Any] = model.generate(input_ids=__lowerCAmelCase ,attention_mask=inputs["attention_mask"] ,max_new_tokens=12 )
_lowerCamelCase : Optional[int] = tokenizer(sentences[0] ,return_tensors="tf" ).input_ids
_lowerCamelCase : Dict = model.generate(input_ids=__lowerCAmelCase ,max_new_tokens=12 )
_lowerCamelCase : Optional[Any] = tokenizer(sentences[1] ,return_tensors="tf" ).input_ids
_lowerCamelCase : Optional[int] = model.generate(input_ids=__lowerCAmelCase ,max_new_tokens=12 )
_lowerCamelCase : Dict = tokenizer.batch_decode(__lowerCAmelCase ,skip_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=__lowerCAmelCase )
_lowerCamelCase : str = tokenizer.decode(output_padded[0] ,skip_special_tokens=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(__lowerCAmelCase ,__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase ,[non_padded_sentence, padded_sentence] ) | 356 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( _a ):
lowerCAmelCase__ = 'char'
lowerCAmelCase__ = 'bpe'
lowerCAmelCase__ = 'wp'
_lowerCAmelCase : List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( _a ):
lowerCAmelCase__ = ['image_processor', 'char_tokenizer']
lowerCAmelCase__ = 'ViTImageProcessor'
lowerCAmelCase__ = 'MgpstrTokenizer'
def __init__( self: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: Optional[int]=None ,**__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." ,__lowerCAmelCase ,)
_lowerCamelCase : List[Any] = kwargs.pop("feature_extractor" )
_lowerCamelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
_lowerCamelCase : List[str] = tokenizer
_lowerCamelCase : str = AutoTokenizer.from_pretrained("gpt2" )
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(__lowerCAmelCase ,__lowerCAmelCase )
def __call__( self: Optional[int] ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Union[str, Any]=None ,__lowerCAmelCase: Optional[Any]=None ,**__lowerCAmelCase: Tuple ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
_lowerCamelCase : Optional[int] = self.image_processor(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if text is not None:
_lowerCamelCase : int = self.char_tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_lowerCamelCase : Tuple = encodings["input_ids"]
return inputs
def _lowercase ( self: int ,__lowerCAmelCase: Dict ):
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = sequences
_lowerCamelCase : Dict = char_preds.size(0 )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self._decode_helper(__lowerCAmelCase ,"char" )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = self._decode_helper(__lowerCAmelCase ,"bpe" )
_lowerCamelCase, _lowerCamelCase : Tuple = self._decode_helper(__lowerCAmelCase ,"wp" )
_lowerCamelCase : List[str] = []
_lowerCamelCase : str = []
for i in range(__lowerCAmelCase ):
_lowerCamelCase : str = [char_scores[i], bpe_scores[i], wp_scores[i]]
_lowerCamelCase : List[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
_lowerCamelCase : Optional[Any] = scores.index(max(__lowerCAmelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = final_strs
_lowerCamelCase : int = final_scores
_lowerCamelCase : str = char_strs
_lowerCamelCase : Dict = bpe_strs
_lowerCamelCase : int = wp_strs
return out
def _lowercase ( self: List[str] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: List[Any] ):
'''simple docstring'''
if format == DecodeType.CHARACTER:
_lowerCamelCase : int = self.char_decode
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Optional[int] = "[s]"
elif format == DecodeType.BPE:
_lowerCamelCase : Dict = self.bpe_decode
_lowerCamelCase : str = 2
_lowerCamelCase : Union[str, Any] = "#"
elif format == DecodeType.WORDPIECE:
_lowerCamelCase : int = self.wp_decode
_lowerCamelCase : List[str] = 102
_lowerCamelCase : List[Any] = "[SEP]"
else:
raise ValueError(F"""Format {format} is not supported.""" )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], []
_lowerCamelCase : Any = pred_logits.size(0 )
_lowerCamelCase : int = pred_logits.size(1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = pred_logits.topk(1 ,dim=-1 ,largest=__lowerCAmelCase ,sorted=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = preds_index.view(-1 ,__lowerCAmelCase )[:, 1:]
_lowerCamelCase : List[str] = decoder(__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : str = torch.nn.functional.softmax(__lowerCAmelCase ,dim=2 ).max(dim=2 )
_lowerCamelCase : Any = preds_max_prob[:, 1:]
for index in range(__lowerCAmelCase ):
_lowerCamelCase : List[Any] = preds_str[index].find(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = preds_str[index][:pred_eos]
_lowerCamelCase : Optional[Any] = preds_index[index].cpu().tolist()
_lowerCamelCase : List[str] = pred_index.index(__lowerCAmelCase ) if eos_token in pred_index else -1
_lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
_lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__lowerCAmelCase )
conf_scores.append(__lowerCAmelCase )
return dec_strs, conf_scores
def _lowercase ( self: Tuple ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : str = [seq.replace(" " ,"" ) for seq in self.char_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs
def _lowercase ( self: List[str] ,__lowerCAmelCase: List[str] ):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__lowerCAmelCase )
def _lowercase ( self: Tuple ,__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = [seq.replace(" " ,"" ) for seq in self.wp_tokenizer.batch_decode(__lowerCAmelCase )]
return decode_strs | 340 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list[int]:
__lowerCamelCase : str = len(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
for j in range(i + 1 , lowerCamelCase__ ):
if numbers[j] < numbers[i]:
__lowerCamelCase , __lowerCamelCase : str = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a =input("""Enter numbers separated by a comma:\n""").strip()
a =[int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 73 |
import os
def UpperCAmelCase__ ( _A : Any ):
'''simple docstring'''
a__ =len(grid[0] )
a__ =len(_A )
a__ =0
a__ =0
a__ =0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_A ):
for j in range(n_rows - 3 ):
a__ =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
a__ =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
a__ =(
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
a__ =(
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
a__ =max(
_A , _A , _A , _A )
if max_product > largest:
a__ =max_product
return largest
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =[]
with open(os.path.dirname(_A ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
a__ =[[int(_A ) for i in grid[j]] for j in range(len(_A ) )]
return largest_product(_A )
if __name__ == "__main__":
print(solution())
| 188 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'scipy']
def __init__( self : List[Any] , *_A : Optional[Any] , **_A : Tuple ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def lowercase_ ( cls : Dict , *_A : Tuple , **_A : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : Tuple , **_A : Dict ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''scipy'''] )
| 299 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase__ )
for i in range(length - 1 ):
UpperCAmelCase__ : Optional[Any] = i
for k in range(i + 1 , lowerCAmelCase__ ):
if collection[k] < collection[least]:
UpperCAmelCase__ : Dict = k
if least != i:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 299 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase = 1
@register_to_config
def __init__( self , _snake_case=2000 , _snake_case=0.1 , _snake_case=20 , _snake_case=1e-3 ):
"""simple docstring"""
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
def snake_case ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
_lowerCAmelCase = torch.linspace(1 , self.config.sampling_eps , _snake_case , device=_snake_case )
def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case=None ):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
_lowerCAmelCase = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
_lowerCAmelCase = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
_lowerCAmelCase = std.flatten()
while len(std.shape ) < len(score.shape ):
_lowerCAmelCase = std.unsqueeze(-1 )
_lowerCAmelCase = -score / std
# compute
_lowerCAmelCase = -1.0 / len(self.timesteps )
_lowerCAmelCase = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
_lowerCAmelCase = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
_lowerCAmelCase = beta_t.unsqueeze(-1 )
_lowerCAmelCase = -0.5 * beta_t * x
_lowerCAmelCase = torch.sqrt(_snake_case )
_lowerCAmelCase = drift - diffusion**2 * score
_lowerCAmelCase = x + drift * dt
# add noise
_lowerCAmelCase = randn_tensor(x.shape , layout=x.layout , generator=_snake_case , device=x.device , dtype=x.dtype )
_lowerCAmelCase = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 82 |
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowercase__ : List[str] = logging.getLogger()
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : Union[str, Any] = os.path.join(lowerCAmelCase__ , 'all_results.json' )
if os.path.exists(lowerCAmelCase__ ):
with open(lowerCAmelCase__ , 'r' ) as f:
lowerCAmelCase_ : Any = json.load(lowerCAmelCase__ )
else:
raise ValueError(f"can't find {path}" )
return results
lowercase__ : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self : Any ):
import xla_spawn
lowerCAmelCase_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ : Optional[Any] = F"\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(SCREAMING_SNAKE_CASE_ , 'argv' , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : Optional[Any] = time()
xla_spawn.main()
lowerCAmelCase_ : Any = time()
lowerCAmelCase_ : Dict = get_results(SCREAMING_SNAKE_CASE_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
import xla_spawn
lowerCAmelCase_ : Dict = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(SCREAMING_SNAKE_CASE_ , 'argv' , SCREAMING_SNAKE_CASE_ ):
xla_spawn.main()
| 224 | 0 |
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__lowerCamelCase : Optional[Any] = NewType("""DataClass""", Any)
__lowerCamelCase : Any = NewType("""DataClassType""", Any)
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict ):
if isinstance(snake_case_ , snake_case_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : list ):
snake_case__ : Tuple = {str(snake_case_ ): choice for choice in choices}
return lambda snake_case_ : str_to_choice.get(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE ( *,
snake_case_ : Union[str, List[str]] = None , snake_case_ : str = None , snake_case_ : Any = dataclasses.MISSING , snake_case_ : Callable[[], Any] = dataclasses.MISSING , snake_case_ : dict = None , **snake_case_ : Union[str, Any] , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
snake_case__ : Union[str, Any] = {}
if aliases is not None:
snake_case__ : int = aliases
if help is not None:
snake_case__ : Dict = help
return dataclasses.field(metadata=snake_case_ , default=snake_case_ , default_factory=snake_case_ , **snake_case_ )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = 42
def __init__( self : Dict , __A : Union[DataClassType, Iterable[DataClassType]] , **__A : Optional[int] ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
snake_case__ : Tuple = ArgumentDefaultsHelpFormatter
super().__init__(**__A )
if dataclasses.is_dataclass(__A ):
snake_case__ : List[Any] = [dataclass_types]
snake_case__ : Optional[int] = list(__A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__A )
@staticmethod
def _lowercase ( __A : ArgumentParser , __A : dataclasses.Field ):
snake_case__ : Optional[Any] = f'''--{field.name}'''
snake_case__ : int = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __A ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
snake_case__ : Dict = kwargs.pop("aliases" , [] )
if isinstance(__A , __A ):
snake_case__ : Union[str, Any] = [aliases]
snake_case__ : int = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__A , "UnionType" ) and isinstance(__A , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__A ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f''' Problem encountered in field \'{field.name}\'.''' )
if type(__A ) not in field.type.__args__:
# filter `str` in Union
snake_case__ : List[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
snake_case__ : List[Any] = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
snake_case__ : Optional[int] = (
field.type.__args__[0] if isinstance(__A , field.type.__args__[1] ) else field.type.__args__[1]
)
snake_case__ : Optional[Any] = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
snake_case__ : Dict = {}
if origin_type is Literal or (isinstance(field.type , __A ) and issubclass(field.type , __A )):
if origin_type is Literal:
snake_case__ : Any = field.type.__args__
else:
snake_case__ : Any = [x.value for x in field.type]
snake_case__ : Union[str, Any] = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
snake_case__ : int = field.default
else:
snake_case__ : List[str] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
snake_case__ : List[str] = copy(__A )
# Hack because type=bool in argparse does not behave as we want.
snake_case__ : Optional[int] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
snake_case__ : int = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
snake_case__ : Tuple = default
# This tells argparse we accept 0 or 1 value after --field_name
snake_case__ : List[str] = "?"
# This is the value that will get picked if we do --field_name (without value)
snake_case__ : int = True
elif isclass(__A ) and issubclass(__A , __A ):
snake_case__ : List[str] = field.type.__args__[0]
snake_case__ : Optional[Any] = "+"
if field.default_factory is not dataclasses.MISSING:
snake_case__ : Union[str, Any] = field.default_factory()
elif field.default is dataclasses.MISSING:
snake_case__ : List[str] = True
else:
snake_case__ : Optional[Any] = field.type
if field.default is not dataclasses.MISSING:
snake_case__ : Optional[Any] = field.default
elif field.default_factory is not dataclasses.MISSING:
snake_case__ : Optional[int] = field.default_factory()
else:
snake_case__ : str = True
parser.add_argument(__A , *__A , **__A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
snake_case__ : Optional[int] = False
parser.add_argument(f'''--no_{field.name}''' , action="store_false" , dest=field.name , **__A )
def _lowercase ( self : Tuple , __A : DataClassType ):
if hasattr(__A , "_argument_group_name" ):
snake_case__ : List[Any] = self.add_argument_group(dtype._argument_group_name )
else:
snake_case__ : str = self
try:
snake_case__ : Dict[str, type] = get_type_hints(__A )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(__A ):
snake_case__ : Optional[Any] = ".".join(map(__A , sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__A ):
if not field.init:
continue
snake_case__ : Any = type_hints[field.name]
self._parse_dataclass_field(__A , __A )
def _lowercase ( self : Tuple , __A : Optional[Any]=None , __A : str=False , __A : str=True , __A : int=None , __A : Dict=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
snake_case__ : List[Any] = []
if args_filename:
args_files.append(Path(__A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
snake_case__ : List[str] = ArgumentParser()
args_file_parser.add_argument(__A , type=__A , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
snake_case__, snake_case__ : str = args_file_parser.parse_known_args(args=__A )
snake_case__ : List[Any] = vars(__A ).get(args_file_flag.lstrip("-" ) , __A )
if cmd_args_file_paths:
args_files.extend([Path(__A ) for p in cmd_args_file_paths] )
snake_case__ : List[str] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
snake_case__ : Union[str, Any] = file_args + args if args is not None else file_args + sys.argv[1:]
snake_case__, snake_case__ : Any = self.parse_known_args(args=__A )
snake_case__ : Optional[int] = []
for dtype in self.dataclass_types:
snake_case__ : int = {f.name for f in dataclasses.fields(__A ) if f.init}
snake_case__ : int = {k: v for k, v in vars(__A ).items() if k in keys}
for k in keys:
delattr(__A , __A )
snake_case__ : Optional[int] = dtype(**__A )
outputs.append(__A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def _lowercase ( self : Tuple , __A : Dict[str, Any] , __A : bool = False ):
snake_case__ : Optional[Any] = set(args.keys() )
snake_case__ : Optional[Any] = []
for dtype in self.dataclass_types:
snake_case__ : Optional[int] = {f.name for f in dataclasses.fields(__A ) if f.init}
snake_case__ : Any = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
snake_case__ : int = dtype(**__A )
outputs.append(__A )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(__A )}''' )
return tuple(__A )
def _lowercase ( self : List[str] , __A : str , __A : bool = False ):
with open(Path(__A ) , encoding="utf-8" ) as open_json_file:
snake_case__ : Union[str, Any] = json.loads(open_json_file.read() )
snake_case__ : Optional[Any] = self.parse_dict(__A , allow_extra_keys=__A )
return tuple(__A )
def _lowercase ( self : Dict , __A : str , __A : bool = False ):
snake_case__ : Union[str, Any] = self.parse_dict(yaml.safe_load(Path(__A ).read_text() ) , allow_extra_keys=__A )
return tuple(__A )
| 286 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 286 | 1 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Tuple:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(UpperCamelCase__ ):
UpperCamelCase : int = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase : Tuple = FlaxAutoModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> List[str]:
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(UpperCamelCase__ ):
UpperCamelCase : int = AutoConfig.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase : int = FlaxAutoModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__, UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> List[str]:
for model_name in ["bert-base-cased", "bert-large-uncased"]:
UpperCamelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCamelCase : List[str] = FlaxBertModel.from_pretrained(UpperCamelCase__ )
UpperCamelCase : str = tokenizer('Do you support jax jitted function?', return_tensors=TensorType.JAX )
@jax.jit
def eval(**SCREAMING_SNAKE_CASE_ ):
return model(**UpperCamelCase__ )
eval(**UpperCamelCase__ ).block_until_ready()
@slow
def snake_case_ ( self ) -> int:
for model_name in ["roberta-base", "roberta-large"]:
UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCamelCase : Tuple = FlaxRobertaModel.from_pretrained(UpperCamelCase__ )
UpperCamelCase : Dict = tokenizer('Do you support jax jitted function?', return_tensors=TensorType.JAX )
@jax.jit
def eval(**SCREAMING_SNAKE_CASE_ ):
return model(**UpperCamelCase__ )
eval(**UpperCamelCase__ ).block_until_ready()
def snake_case_ ( self ) -> List[Any]:
with self.assertRaisesRegex(
UpperCamelCase__, 'bert-base is not a local folder and is not a valid model identifier' ):
UpperCamelCase : List[Any] = FlaxAutoModel.from_pretrained('bert-base' )
def snake_case_ ( self ) -> Optional[int]:
with self.assertRaisesRegex(
UpperCamelCase__, R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
UpperCamelCase : int = FlaxAutoModel.from_pretrained(UpperCamelCase__, revision='aaaaaa' )
def snake_case_ ( self ) -> Any:
with self.assertRaisesRegex(
UpperCamelCase__, 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack', ):
UpperCamelCase : Optional[Any] = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def snake_case_ ( self ) -> str:
with self.assertRaisesRegex(UpperCamelCase__, 'Use `from_pt=True` to load this model' ):
UpperCamelCase : str = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 119 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__snake_case = """hf-internal-testing/tiny-random-bert"""
__snake_case = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""")
__snake_case = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : int = cached_file(UpperCamelCase__ , UpperCamelCase__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) )
with open(os.path.join(UpperCamelCase__ , "refs" , "main" ) ) as f:
snake_case : Dict = f.read()
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , "snapshots" , UpperCamelCase__ , UpperCamelCase__ ) )
self.assertTrue(os.path.isfile(UpperCamelCase__ ) )
# File is cached at the same place the second time.
snake_case : List[str] = cached_file(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
# Using a specific revision to test the full commit hash.
snake_case : Any = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="9b8c223" )
self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , "snapshots" , UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid model identifier" ):
snake_case : Optional[Any] = cached_file("tiny-random-bert" , UpperCamelCase__ )
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid git identifier" ):
snake_case : Optional[Any] = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision="aaaa" )
with self.assertRaisesRegex(UpperCamelCase__ , "does not appear to have a file named" ):
snake_case : List[Any] = cached_file(UpperCamelCase__ , "conf" )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(UpperCamelCase__ , "does not appear to have a file named" ):
snake_case : Tuple = cached_file(UpperCamelCase__ , "conf" )
with open(os.path.join(UpperCamelCase__ , "refs" , "main" ) ) as f:
snake_case : Any = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , ".no_exist" , UpperCamelCase__ , "conf" ) ) )
snake_case : Optional[Any] = cached_file(UpperCamelCase__ , "conf" , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
snake_case : Any = cached_file(UpperCamelCase__ , "conf" , local_files_only=UpperCamelCase__ , _raise_exceptions_for_missing_entries=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
snake_case : Any = mock.Mock()
snake_case : List[Any] = 500
snake_case : int = {}
snake_case : Optional[int] = HTTPError
snake_case : Tuple = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=UpperCamelCase__ ) as mock_head:
snake_case : Tuple = cached_file(UpperCamelCase__ , "conf" , _raise_exceptions_for_connection_errors=UpperCamelCase__ )
self.assertIsNone(UpperCamelCase__ )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCamelCase__ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCamelCase__ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCamelCase__ ) )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , UpperCamelCase__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase__ , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , UpperCamelCase__ , revision="ahaha" )
snake_case : int = get_file_from_repo("bert-base-cased" , UpperCamelCase__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case : str = json.loads(open(UpperCamelCase__ , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : int = Path(UpperCamelCase__ ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase__ , "a.txt" ) , str(UpperCamelCase__ ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase__ , "b.txt" ) )
| 203 | 0 |
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
SCREAMING_SNAKE_CASE__ : Any = True
except ImportError:
SCREAMING_SNAKE_CASE__ : str = False
try:
from torch.hub import _get_torch_home
SCREAMING_SNAKE_CASE__ : Optional[Any] = _get_torch_home()
except ImportError:
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(torch_cache_home, "transformers")
SCREAMING_SNAKE_CASE__ : List[Any] = "https://cdn.huggingface.co"
SCREAMING_SNAKE_CASE__ : Dict = "https://s3.amazonaws.com/models.huggingface.co/bert"
SCREAMING_SNAKE_CASE__ : List[str] = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
SCREAMING_SNAKE_CASE__ : Tuple = os.path.join(PATH, "config.yaml")
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(PATH, "attributes.txt")
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(PATH, "objects.txt")
SCREAMING_SNAKE_CASE__ : Dict = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
SCREAMING_SNAKE_CASE__ : Tuple = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
SCREAMING_SNAKE_CASE__ : Dict = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
SCREAMING_SNAKE_CASE__ : Tuple = "pytorch_model.bin"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "config.yaml"
def __magic_name__ ( __lowerCAmelCase : Optional[int]=OBJECTS , __lowerCAmelCase : Any=ATTRIBUTES ) -> Any:
__lowerCamelCase = []
with open(__lowerCAmelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__lowerCamelCase = []
with open(__lowerCAmelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Tuple:
__lowerCamelCase = OrderedDict()
with open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = pkl.load(__lowerCAmelCase )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__lowerCamelCase = ckp.pop(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , np.ndarray ):
__lowerCamelCase = torch.tensor(__lowerCAmelCase )
else:
assert isinstance(__lowerCAmelCase , torch.tensor ), type(__lowerCAmelCase )
__lowerCamelCase = v
return r
class lowerCAmelCase__ :
a__ : Optional[Any] = {}
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : dict , SCREAMING_SNAKE_CASE__ : str = "root" , SCREAMING_SNAKE_CASE__ : Tuple=0 ) -> Optional[Any]:
__lowerCamelCase = name
__lowerCamelCase = level
__lowerCamelCase = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__lowerCamelCase = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = Config(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ , level=level + 1 )
__lowerCamelCase = v
setattr(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = d
def __repr__( self : int ) -> Union[str, Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
__lowerCamelCase = val
__lowerCamelCase = val
__lowerCamelCase = key.split('''.''' )
__lowerCamelCase = len(SCREAMING_SNAKE_CASE__ ) - 1
__lowerCamelCase = self._pointer
if len(SCREAMING_SNAKE_CASE__ ) > 1:
for i, l in enumerate(SCREAMING_SNAKE_CASE__ ):
if hasattr(self , SCREAMING_SNAKE_CASE__ ) and isinstance(getattr(self , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ):
setattr(getattr(self , SCREAMING_SNAKE_CASE__ ) , '''.'''.join(levels[i:] ) , SCREAMING_SNAKE_CASE__ )
if l == last_level:
__lowerCamelCase = val
else:
__lowerCamelCase = pointer[l]
def __A ( self : List[str] ) -> Dict:
return self._pointer
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
with open(f'''{file_name}''' , '''w''' ) as stream:
dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str ) -> Dict:
with open(f'''{file_name}''' , '''w''' ) as stream:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@staticmethod
def __A ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
with open(SCREAMING_SNAKE_CASE__ ) as stream:
__lowerCamelCase = load(SCREAMING_SNAKE_CASE__ , Loader=SCREAMING_SNAKE_CASE__ )
return data
def __str__( self : int ) -> Any:
__lowerCamelCase = ''' '''
if self._name != "root":
__lowerCamelCase = f'''{t * (self._level-1)}{self._name}:\n'''
else:
__lowerCamelCase = ''''''
__lowerCamelCase = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(SCREAMING_SNAKE_CASE__ ).__name__})\n'''
__lowerCamelCase = level
return r[:-1]
@classmethod
def __A ( cls : List[str] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ) -> Any:
__lowerCamelCase , __lowerCamelCase = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return cls(SCREAMING_SNAKE_CASE__ )
@classmethod
def __A ( cls : str , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
__lowerCamelCase = kwargs.pop('''cache_dir''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = kwargs.pop('''force_download''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = kwargs.pop('''resume_download''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = kwargs.pop('''proxies''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = kwargs.pop('''local_files_only''' , SCREAMING_SNAKE_CASE__ )
if os.path.isdir(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif os.path.isfile(SCREAMING_SNAKE_CASE__ ) or is_remote_url(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = pretrained_model_name_or_path
else:
__lowerCamelCase = hf_bucket_url(SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , use_cdn=SCREAMING_SNAKE_CASE__ )
try:
# Load from URL or cache if already cached
__lowerCamelCase = cached_path(
SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__lowerCamelCase = Config.load_yaml(SCREAMING_SNAKE_CASE__ )
except EnvironmentError:
__lowerCamelCase = '''Can\'t load config for'''
raise EnvironmentError(SCREAMING_SNAKE_CASE__ )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(SCREAMING_SNAKE_CASE__ ), kwargs
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Tuple:
__lowerCamelCase = torch.load('''dump.pt''' , map_location=in_tensor.device )
__lowerCamelCase = in_tensor.numpy()
__lowerCamelCase = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(__lowerCAmelCase , __lowerCAmelCase , rtol=0.01 , atol=0.1 ), (
f'''{sum([1 for x in np.isclose(__lowerCAmelCase , __lowerCAmelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Union[str, Any]:
__lowerCamelCase = urlparse(__lowerCAmelCase )
return parsed.scheme in ("http", "https")
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=True ) -> str:
__lowerCamelCase = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__lowerCamelCase = '''/''' not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int=None , __lowerCAmelCase : Optional[int]=0 , __lowerCAmelCase : Tuple=None , ) -> Optional[Any]:
__lowerCamelCase = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
ua += "; " + "; ".join('''{}/{}'''.format(__lowerCAmelCase , __lowerCAmelCase ) for k, v in user_agent.items() )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
ua += "; " + user_agent
__lowerCamelCase = {'''user-agent''': ua}
if resume_size > 0:
__lowerCamelCase = '''bytes=%d-''' % (resume_size,)
__lowerCamelCase = requests.get(__lowerCAmelCase , stream=__lowerCAmelCase , proxies=__lowerCAmelCase , headers=__lowerCAmelCase )
if response.status_code == 416: # Range not satisfiable
return
__lowerCamelCase = response.headers.get('''Content-Length''' )
__lowerCamelCase = resume_size + int(__lowerCAmelCase ) if content_length is not None else None
__lowerCamelCase = tqdm(
unit='''B''' , unit_scale=__lowerCAmelCase , total=__lowerCAmelCase , initial=__lowerCAmelCase , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(__lowerCAmelCase ) )
temp_file.write(__lowerCAmelCase )
progress.close()
def __magic_name__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=False , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[Any]=10 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Union[str, Any]=False , ) -> Dict:
if cache_dir is None:
__lowerCamelCase = TRANSFORMERS_CACHE
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase = str(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
__lowerCamelCase = None
if not local_files_only:
try:
__lowerCamelCase = requests.head(__lowerCAmelCase , allow_redirects=__lowerCAmelCase , proxies=__lowerCAmelCase , timeout=__lowerCAmelCase )
if response.status_code == 200:
__lowerCamelCase = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__lowerCamelCase = url_to_filename(__lowerCAmelCase , __lowerCAmelCase )
# get cache path to put the file
__lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(__lowerCAmelCase ):
return cache_path
else:
__lowerCamelCase = [
file
for file in fnmatch.filter(os.listdir(__lowerCAmelCase ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(__lowerCAmelCase ) > 0:
return os.path.join(__lowerCAmelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(__lowerCAmelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__lowerCamelCase = cache_path + '''.lock'''
with FileLock(__lowerCAmelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(__lowerCAmelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__lowerCamelCase = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(__lowerCAmelCase , '''a+b''' ) as f:
yield f
__lowerCamelCase = _resumable_file_manager
if os.path.exists(__lowerCAmelCase ):
__lowerCamelCase = os.stat(__lowerCAmelCase ).st_size
else:
__lowerCamelCase = 0
else:
__lowerCamelCase = partial(tempfile.NamedTemporaryFile , dir=__lowerCAmelCase , delete=__lowerCAmelCase )
__lowerCamelCase = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , __lowerCAmelCase , temp_file.name , )
http_get(
__lowerCAmelCase , __lowerCAmelCase , proxies=__lowerCAmelCase , resume_size=__lowerCAmelCase , user_agent=__lowerCAmelCase , )
os.replace(temp_file.name , __lowerCAmelCase )
__lowerCamelCase = {'''url''': url, '''etag''': etag}
__lowerCamelCase = cache_path + '''.json'''
with open(__lowerCAmelCase , '''w''' ) as meta_file:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
return cache_path
def __magic_name__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any]=None ) -> int:
__lowerCamelCase = url.encode('''utf-8''' )
__lowerCamelCase = shaaaa(__lowerCAmelCase )
__lowerCamelCase = url_hash.hexdigest()
if etag:
__lowerCamelCase = etag.encode('''utf-8''' )
__lowerCamelCase = shaaaa(__lowerCAmelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : Dict=False , ) -> List[str]:
if cache_dir is None:
__lowerCamelCase = TRANSFORMERS_CACHE
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase = str(__lowerCAmelCase )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
__lowerCamelCase = str(__lowerCAmelCase )
if is_remote_url(__lowerCAmelCase ):
# URL, so get it from the cache (downloading if necessary)
__lowerCamelCase = get_from_cache(
__lowerCAmelCase , cache_dir=__lowerCAmelCase , force_download=__lowerCAmelCase , proxies=__lowerCAmelCase , resume_download=__lowerCAmelCase , user_agent=__lowerCAmelCase , local_files_only=__lowerCAmelCase , )
elif os.path.exists(__lowerCAmelCase ):
# File, and it exists.
__lowerCamelCase = url_or_filename
elif urlparse(__lowerCAmelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(__lowerCAmelCase ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(__lowerCAmelCase ) )
if extract_compressed_file:
if not is_zipfile(__lowerCAmelCase ) and not tarfile.is_tarfile(__lowerCAmelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__lowerCamelCase , __lowerCamelCase = os.path.split(__lowerCAmelCase )
__lowerCamelCase = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isdir(__lowerCAmelCase ) and os.listdir(__lowerCAmelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__lowerCamelCase = output_path + '''.lock'''
with FileLock(__lowerCAmelCase ):
shutil.rmtree(__lowerCAmelCase , ignore_errors=__lowerCAmelCase )
os.makedirs(__lowerCAmelCase )
if is_zipfile(__lowerCAmelCase ):
with ZipFile(__lowerCAmelCase , '''r''' ) as zip_file:
zip_file.extractall(__lowerCAmelCase )
zip_file.close()
elif tarfile.is_tarfile(__lowerCAmelCase ):
__lowerCamelCase = tarfile.open(__lowerCAmelCase )
tar_file.extractall(__lowerCAmelCase )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(__lowerCAmelCase ) )
return output_path_extracted
return output_path
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any]="," ) -> Any:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
with open(__lowerCAmelCase ) as f:
__lowerCamelCase = eval(f.read() )
else:
__lowerCamelCase = requests.get(__lowerCAmelCase )
try:
__lowerCamelCase = requests.json()
except Exception:
__lowerCamelCase = req.content.decode()
assert data is not None, "could not connect"
try:
__lowerCamelCase = eval(__lowerCAmelCase )
except Exception:
__lowerCamelCase = data.split('''\n''' )
req.close()
return data
def __magic_name__ ( __lowerCAmelCase : Optional[Any] ) -> List[str]:
__lowerCamelCase = requests.get(__lowerCAmelCase )
__lowerCamelCase = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> str:
__lowerCamelCase = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(__lowerCAmelCase )
with open(__lowerCAmelCase , '''rb''' ) as stream:
__lowerCamelCase = pkl.load(__lowerCAmelCase )
__lowerCamelCase = weights.pop('''model''' )
__lowerCamelCase = {}
for k, v in model.items():
__lowerCamelCase = torch.from_numpy(__lowerCAmelCase )
if "running_var" in k:
__lowerCamelCase = torch.tensor([0] )
__lowerCamelCase = k.replace('''running_var''' , '''num_batches_tracked''' )
__lowerCamelCase = zero
return new
def __magic_name__ ( ) -> Any:
print(f'''{os.path.abspath(os.path.join(__lowerCAmelCase , os.pardir ) )}/demo.ipynb''' )
def __magic_name__ ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict="RGB" ) -> str:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
if os.path.isfile(__lowerCAmelCase ):
__lowerCamelCase = cva.imread(__lowerCAmelCase )
else:
__lowerCamelCase = get_image_from_url(__lowerCAmelCase )
assert img is not None, f'''could not connect to: {im}'''
__lowerCamelCase = cva.cvtColor(__lowerCAmelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__lowerCamelCase = img[:, :, ::-1]
return img
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple=1 ) -> Optional[int]:
return (images[i : i + batch] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ))
| 365 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
SCREAMING_SNAKE_CASE__ : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
SCREAMING_SNAKE_CASE__ : int = {"facebook/blenderbot_small-90M": 512}
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Tuple:
__lowerCamelCase = set()
__lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase = char
__lowerCamelCase = set(__lowerCAmelCase )
return pairs
class lowerCAmelCase__ ( __lowercase ):
a__ : List[Any] = VOCAB_FILES_NAMES
a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple="__start__" , SCREAMING_SNAKE_CASE__ : Tuple="__end__" , SCREAMING_SNAKE_CASE__ : List[str]="__unk__" , SCREAMING_SNAKE_CASE__ : str="__null__" , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]:
super().__init__(unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle:
__lowerCamelCase = json.load(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle:
__lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1]
__lowerCamelCase = [tuple(merge.split() ) for merge in merges]
__lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__lowerCamelCase = {}
@property
def __A ( self : Dict ) -> int:
return len(self.encoder )
def __A ( self : str ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> str:
if token in self.cache:
return self.cache[token]
__lowerCamelCase = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE__ )
if "\n" in token:
__lowerCamelCase = token.replace('''\n''' , ''' __newln__''' )
__lowerCamelCase = token.split(''' ''' )
__lowerCamelCase = []
for token in tokens:
if not len(SCREAMING_SNAKE_CASE__ ):
continue
__lowerCamelCase = token.lower()
__lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
__lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
words.append(SCREAMING_SNAKE_CASE__ )
continue
while True:
__lowerCamelCase = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase = bigram
__lowerCamelCase = []
__lowerCamelCase = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
__lowerCamelCase = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
new_word.extend(word[i:j] )
__lowerCamelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
__lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = '''@@ '''.join(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = word[:-4]
__lowerCamelCase = word
words.append(SCREAMING_SNAKE_CASE__ )
return " ".join(SCREAMING_SNAKE_CASE__ )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
__lowerCamelCase = []
__lowerCamelCase = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE__ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) ) )
return split_tokens
def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> int:
__lowerCamelCase = token.lower()
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> str:
return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
__lowerCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE__ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' )
__lowerCamelCase = 0
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
__lowerCamelCase = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 339 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :int ) -> Any:
__SCREAMING_SNAKE_CASE : Optional[int] = inspect.getfile(accelerate.test_utils )
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__SCREAMING_SNAKE_CASE : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __magic_name__( self :Optional[int] ) -> Any:
print(f'''Found {torch.cuda.device_count()} devices.''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def __magic_name__( self :Optional[int] ) -> Tuple:
print(f'''Found {torch.cuda.device_count()} devices.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def __magic_name__( self :Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
@require_multi_gpu
def __magic_name__( self :str ) -> int:
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
__SCREAMING_SNAKE_CASE : Dict = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(lowerCAmelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
__lowerCAmelCase : Dict =Accelerator()
__lowerCAmelCase : Any =(accelerator.state.process_index + 2, 1_0)
__lowerCAmelCase : List[Any] =torch.randint(0, 1_0, shape).to(accelerator.device)
__lowerCAmelCase : Tuple =''
__lowerCAmelCase : Union[str, Any] =accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__lowerCAmelCase : Union[str, Any] =accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__lowerCAmelCase : Optional[int] =accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 9 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any ={'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 | 1 |
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(__SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
def count_of_possible_combinations_with_dp_array(
__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase_ : str = sum(
count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE )
for item in array )
lowercase_ : Tuple = answer
return answer
lowercase_ : Optional[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Dict = [0] * (target + 1)
lowercase_ : Dict = 1
for i in range(1 , target + 1 ):
for j in range(__SCREAMING_SNAKE_CASE ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE =3
__SCREAMING_SNAKE_CASE =5
__SCREAMING_SNAKE_CASE =[1, 2, 5]
print(combination_sum_iv(n, array, target))
| 364 | """simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 321 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _lowercase :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : Node | None = None
_SCREAMING_SNAKE_CASE : Node | None = None
def UpperCamelCase_ ( ) -> Node | None:
'''simple docstring'''
__lowerCAmelCase = Node(1 )
__lowerCAmelCase = Node(2 )
__lowerCAmelCase = Node(3 )
__lowerCAmelCase = Node(4 )
__lowerCAmelCase = Node(5 )
return tree
def UpperCamelCase_ ( snake_case_ : Node | None ) -> list[int]:
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCamelCase_ ( snake_case_ : Node | None ) -> list[int]:
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCamelCase_ ( snake_case_ : Node | None ) -> list[int]:
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCamelCase_ ( snake_case_ : Node | None ) -> int:
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCamelCase_ ( snake_case_ : Node | None ) -> Sequence[Node | None]:
'''simple docstring'''
__lowerCAmelCase = []
if root is None:
return output
__lowerCAmelCase = deque([root] )
while process_queue:
__lowerCAmelCase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCamelCase_ ( snake_case_ : Node | None , snake_case_ : int ) -> Sequence[Node | None]:
'''simple docstring'''
__lowerCAmelCase = []
def populate_output(snake_case_ : Node | None , snake_case_ : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(snake_case_ , snake_case_ )
return output
def UpperCamelCase_ ( snake_case_ : Node | None , snake_case_ : int ) -> Sequence[Node | None]:
'''simple docstring'''
__lowerCAmelCase = []
def populate_output(snake_case_ : Node | None , snake_case_ : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(snake_case_ , snake_case_ )
return output
def UpperCamelCase_ ( snake_case_ : Node | None ) -> Sequence[Node | None] | list[Any]:
'''simple docstring'''
if root is None:
return []
__lowerCAmelCase = []
__lowerCAmelCase = 0
__lowerCAmelCase = height(snake_case_ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(snake_case_ , snake_case_ ) )
__lowerCAmelCase = 1
else:
output.append(get_nodes_from_right_to_left(snake_case_ , snake_case_ ) )
__lowerCAmelCase = 0
return output
def UpperCamelCase_ ( ) -> None: # Main function for testing.
'''simple docstring'''
__lowerCAmelCase = make_tree()
print(f"""In-order Traversal: {inorder(snake_case_ )}""" )
print(f"""Pre-order Traversal: {preorder(snake_case_ )}""" )
print(f"""Post-order Traversal: {postorder(snake_case_ )}""" , """\n""" )
print(f"""Height of Tree: {height(snake_case_ )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(snake_case_ ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(snake_case_ ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(snake_case_ , level=snake_case_ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 229 | '''simple docstring'''
def UpperCamelCase_ ( snake_case_ : Union[str, Any]=2_81_23 ) -> str:
'''simple docstring'''
__lowerCAmelCase = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__lowerCAmelCase = set()
__lowerCAmelCase = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(snake_case_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 229 | 1 |
import math
import tensorflow as tf
from packaging import version
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Any = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Any = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
A_ : Tuple = tf.cast(math.pi , x.dtype )
A_ : str = tf.cast(0.0_4_4_7_1_5 , x.dtype )
A_ : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(SCREAMING_SNAKE_CASE , 3 )) ))
return x * cdf
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : List[str] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
return x * tf.tanh(tf.math.softplus(SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : List[str] = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
A_ : str = tf.cast(0.0_4_4_7_1_5 , x.dtype )
A_ : Optional[int] = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
A_ : List[str] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return tf.clip_by_value(_gelu(SCREAMING_SNAKE_CASE ) , -10 , 10 )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=-1 ):
A_ , A_ : Any = tf.split(SCREAMING_SNAKE_CASE , 2 , axis=SCREAMING_SNAKE_CASE )
return a * tf.math.sigmoid(SCREAMING_SNAKE_CASE )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return tf.keras.activations.gelu(SCREAMING_SNAKE_CASE , approximate=SCREAMING_SNAKE_CASE )
UpperCamelCase = tf.keras.activations.gelu
UpperCamelCase = approximate_gelu_wrap
else:
UpperCamelCase = _gelu
UpperCamelCase = _gelu_new
UpperCamelCase = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 65 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->int:
'''simple docstring'''
A_ : List[str] = tempfile.mkdtemp()
A_ : Optional[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Union[str, Any] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
A_ : Tuple = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Optional[int] = self.get_tokenizer()
A_ : int = self.get_rust_tokenizer()
A_ : Optional[int] = self.get_image_processor()
A_ : Optional[Any] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
A_ : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
A_ : List[str] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
A_ : int = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Dict = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Any = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
A_ : Optional[Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : str = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : Optional[Any] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Tuple = self.prepare_image_inputs()
A_ : Optional[Any] = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
A_ : Union[str, Any] = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Union[str, Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : List[str] = '''lower newer'''
A_ : Tuple = processor(text=_SCREAMING_SNAKE_CASE )
A_ : int = tokenizer(_SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = '''lower newer'''
A_ : List[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : List[Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Tuple = processor.batch_decode(_SCREAMING_SNAKE_CASE )
A_ : int = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Dict = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Optional[Any] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : int = '''lower newer'''
A_ : Dict = self.prepare_image_inputs()
A_ : List[Any] = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 65 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : str = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A__ ( lowerCAmelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = """pegasus"""
SCREAMING_SNAKE_CASE = ["""past_key_values"""]
SCREAMING_SNAKE_CASE = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=5_0265 , _SCREAMING_SNAKE_CASE: str=1024 , _SCREAMING_SNAKE_CASE: Any=12 , _SCREAMING_SNAKE_CASE: Optional[int]=4096 , _SCREAMING_SNAKE_CASE: List[Any]=16 , _SCREAMING_SNAKE_CASE: Tuple=12 , _SCREAMING_SNAKE_CASE: Optional[int]=4096 , _SCREAMING_SNAKE_CASE: Any=16 , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.0 , _SCREAMING_SNAKE_CASE: Optional[int]=True , _SCREAMING_SNAKE_CASE: Optional[Any]=True , _SCREAMING_SNAKE_CASE: List[Any]="gelu" , _SCREAMING_SNAKE_CASE: Dict=1024 , _SCREAMING_SNAKE_CASE: str=0.1 , _SCREAMING_SNAKE_CASE: Tuple=0.0 , _SCREAMING_SNAKE_CASE: Optional[int]=0.0 , _SCREAMING_SNAKE_CASE: List[Any]=0.02 , _SCREAMING_SNAKE_CASE: Optional[int]=0 , _SCREAMING_SNAKE_CASE: str=False , _SCREAMING_SNAKE_CASE: Dict=0 , _SCREAMING_SNAKE_CASE: Dict=1 , _SCREAMING_SNAKE_CASE: List[str]=1 , **_SCREAMING_SNAKE_CASE: str , ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : str = max_position_embeddings
__lowerCAmelCase : List[str] = d_model
__lowerCAmelCase : Optional[Any] = encoder_ffn_dim
__lowerCAmelCase : List[str] = encoder_layers
__lowerCAmelCase : str = encoder_attention_heads
__lowerCAmelCase : List[Any] = decoder_ffn_dim
__lowerCAmelCase : Tuple = decoder_layers
__lowerCAmelCase : str = decoder_attention_heads
__lowerCAmelCase : List[str] = dropout
__lowerCAmelCase : List[Any] = attention_dropout
__lowerCAmelCase : Tuple = activation_dropout
__lowerCAmelCase : List[str] = activation_function
__lowerCAmelCase : List[Any] = init_std
__lowerCAmelCase : Union[str, Any] = encoder_layerdrop
__lowerCAmelCase : Union[str, Any] = decoder_layerdrop
__lowerCAmelCase : Optional[int] = use_cache
__lowerCAmelCase : Optional[Any] = encoder_layers
__lowerCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=snake_case_ , eos_token_id=snake_case_ , is_encoder_decoder=snake_case_ , decoder_start_token_id=snake_case_ , forced_eos_token_id=snake_case_ , **snake_case_ , )
@property
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[Any]:
"""simple docstring"""
return self.encoder_attention_heads
@property
def _SCREAMING_SNAKE_CASE ( self: Any) -> Tuple:
"""simple docstring"""
return self.d_model | 269 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__SCREAMING_SNAKE_CASE :str = [
'''good first issue''',
'''feature request''',
'''wip''',
]
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = Github(os.environ["GITHUB_TOKEN"] )
_UpperCAmelCase = g.get_repo("huggingface/accelerate" )
_UpperCAmelCase = repo.get_issues(state="open" )
for issue in open_issues:
_UpperCAmelCase = sorted([comment for comment in issue.get_comments()] , key=lambda __lowercase : i.created_at , reverse=__lowercase )
_UpperCAmelCase = comments[0] if len(__lowercase ) > 0 else None
_UpperCAmelCase = dt.utcnow()
_UpperCAmelCase = (current_time - issue.updated_at).days
_UpperCAmelCase = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 22 | 0 |
import argparse
import shutil
import time
from json import JSONDecodeError
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import (
SeqaSeqDataset,
calculate_bleu,
calculate_rouge,
chunks,
lmap,
load_json,
parse_numeric_n_bool_cl_kwargs,
save_json,
use_task_specific_params,
write_txt_file,
)
__UpperCAmelCase = getLogger(__name__)
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : int = 8 , snake_case__ : int = 1024 , snake_case__ : Any="val" , snake_case__ : int=None , snake_case__ : Optional[Any]=False , snake_case__ : Any="summarization" , snake_case__ : Dict=None , snake_case__ : List[Any]=1 , snake_case__ : Dict = None , snake_case__ : Optional[Any]="" , **snake_case__ : List[str] , ) -> Dict:
UpperCamelCase : Optional[Any] = str(snake_case__ )
assert local_rank is not None
torch.distributed.init_process_group(backend='nccl' , rank=snake_case__ )
UpperCamelCase : Union[str, Any] = Path(snake_case__ )
UpperCamelCase : Optional[int] = save_dir.joinpath(F"""rank_{local_rank}_output.json""" )
torch.cuda.set_device(snake_case__ )
UpperCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(snake_case__ ).cuda()
if fpaa:
UpperCamelCase : Any = model.half()
# determine if we need to increase num_beams
use_task_specific_params(snake_case__ , snake_case__ ) # update config with task specific params
UpperCamelCase : Dict = generate_kwargs.pop('num_beams' , model.config.num_beams ) # AttributeError risk?
if num_return_sequences > num_beams:
UpperCamelCase : int = num_return_sequences
UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(snake_case__ )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
if max_source_length is None:
UpperCamelCase : Any = tokenizer.model_max_length
if prefix is None:
UpperCamelCase : Optional[int] = prefix or getattr(model.config , 'prefix' , '' ) or ''
UpperCamelCase : Dict = SeqaSeqDataset(
snake_case__ , snake_case__ , snake_case__ , max_target_length=1024 , type_path=snake_case__ , n_obs=snake_case__ , prefix=snake_case__ , **snake_case__ , )
# I set shuffle=True for a more accurate progress bar.
# If all the longest samples are first, the prog bar estimate is too high at the beginning.
UpperCamelCase : str = ds.make_sortish_sampler(snake_case__ , distributed=snake_case__ , add_extra_examples=snake_case__ , shuffle=snake_case__ )
UpperCamelCase : Optional[Any] = DataLoader(snake_case__ , sampler=snake_case__ , batch_size=snake_case__ , collate_fn=ds.collate_fn )
UpperCamelCase : Optional[Any] = []
for batch in tqdm(snake_case__ ):
UpperCamelCase : Tuple = model.generate(
input_ids=batch['input_ids'].to(model.device ) , attention_mask=batch['attention_mask'].to(model.device ) , num_return_sequences=snake_case__ , num_beams=snake_case__ , **snake_case__ , )
UpperCamelCase : Tuple = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
UpperCamelCase : List[str] = batch['ids']
if num_return_sequences > 1:
UpperCamelCase : Any = chunks(snake_case__ , snake_case__ ) # batch size chunks, each of size num_return_seq
for i, pred in enumerate(snake_case__ ):
results.append({'pred': pred, 'id': ids[i].item()} )
save_json(snake_case__ , snake_case__ )
return results, sampler.num_replicas
def UpperCamelCase ( ) -> Optional[Any]:
UpperCamelCase : List[str] = argparse.ArgumentParser(
epilog='Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate' )
parser.add_argument('--data_dir' , type=snake_case__ , help='like cnn_dm/test.source' )
parser.add_argument(
'--model_name' , type=snake_case__ , help='like facebook/bart-large-cnn,t5-base, etc.' , default='sshleifer/distilbart-xsum-12-3' , )
parser.add_argument('--save_dir' , type=snake_case__ , help='where to save' , default='tmp_gen' )
parser.add_argument('--max_source_length' , type=snake_case__ , default=snake_case__ )
parser.add_argument(
'--type_path' , type=snake_case__ , default='test' , help='which subset to evaluate typically train/val/test' )
parser.add_argument('--task' , type=snake_case__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=snake_case__ , default=8 , required=snake_case__ , help='batch size' )
parser.add_argument(
'--local_rank' , type=snake_case__ , default=-1 , required=snake_case__ , help='should be passed by distributed.launch' )
parser.add_argument(
'--n_obs' , type=snake_case__ , default=snake_case__ , required=snake_case__ , help='How many observations. Defaults to all.' )
parser.add_argument(
'--num_return_sequences' , type=snake_case__ , default=1 , required=snake_case__ , help='How many sequences to return' )
parser.add_argument(
'--sync_timeout' , type=snake_case__ , default=600 , required=snake_case__ , help='How long should master process wait for other processes to finish.' , )
parser.add_argument('--src_lang' , type=snake_case__ , default=snake_case__ , required=snake_case__ )
parser.add_argument('--tgt_lang' , type=snake_case__ , default=snake_case__ , required=snake_case__ )
parser.add_argument(
'--prefix' , type=snake_case__ , required=snake_case__ , default=snake_case__ , help='will be added to the begininng of src examples' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--debug' , action='store_true' )
UpperCamelCase : str = time.time()
UpperCamelCase , UpperCamelCase : str = parser.parse_known_args()
UpperCamelCase : str = parse_numeric_n_bool_cl_kwargs(snake_case__ )
if generate_kwargs and args.local_rank <= 0:
print(F"""parsed the following generate kwargs: {generate_kwargs}""" )
UpperCamelCase : List[Any] = Path(args.save_dir + '_tmp' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) # this handles locking.
UpperCamelCase : List[Any] = list(json_save_dir.glob('rank_*.json' ) )
if intermediate_files:
raise ValueError(F"""Found files at {json_save_dir} please move or remove them.""" )
# In theory, a node could finish and save before another node hits this. If this happens, we can address later.
UpperCamelCase : Union[str, Any] = {}
if args.src_lang is not None:
UpperCamelCase : Any = args.src_lang
if args.tgt_lang is not None:
UpperCamelCase : str = args.tgt_lang
Path(args.save_dir ).mkdir(exist_ok=snake_case__ )
UpperCamelCase , UpperCamelCase : List[str] = eval_data_dir(
args.data_dir , snake_case__ , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=snake_case__ , **snake_case__ , )
if args.local_rank <= 0:
UpperCamelCase : Tuple = Path(args.save_dir )
save_dir.mkdir(exist_ok=snake_case__ )
UpperCamelCase : Tuple = gather_results_from_each_node(snake_case__ , snake_case__ , args.sync_timeout )
UpperCamelCase : Tuple = combine_partial_results(snake_case__ )
if args.num_return_sequences > 1:
UpperCamelCase : Tuple = save_dir.joinpath('pseudolabel_results.json' )
print(F"""Saving aggregated results at {save_path}, intermediate in {json_save_dir}/""" )
save_json(snake_case__ , snake_case__ )
return
UpperCamelCase : Tuple = Path(args.data_dir ).joinpath(args.type_path + '.target' )
with open(snake_case__ ) as f:
UpperCamelCase : Any = [x.rstrip() for x in f.readlines()][: len(snake_case__ )]
# Calculate metrics, save metrics, and save _generations.txt
UpperCamelCase : Tuple = 'translation' in args.task
UpperCamelCase : Any = calculate_bleu if calc_bleu else calculate_rouge
UpperCamelCase : Tuple = 'bleu' if calc_bleu else 'rouge'
UpperCamelCase : Dict = score_fn(snake_case__ , snake_case__ )
UpperCamelCase : Optional[Any] = len(snake_case__ )
UpperCamelCase : str = time.time() - start_time
UpperCamelCase : Union[str, Any] = round(runtime / metrics['n_obs'] , 4 )
UpperCamelCase : Any = num_replicas
# TODO(@stas00): add whatever metadata to metrics
UpperCamelCase : Tuple = save_dir.joinpath(F"""{args.type_path}_{metric_name}.json""" )
save_json(snake_case__ , snake_case__ , indent=snake_case__ )
print(snake_case__ )
write_txt_file(snake_case__ , save_dir.joinpath(F"""{args.type_path}_generations.txt""" ) )
if args.debug:
write_txt_file(snake_case__ , save_dir.joinpath(F"""{args.type_path}.target""" ) )
else:
shutil.rmtree(snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> List:
UpperCamelCase : Optional[int] = []
for partial_result in partial_results:
records.extend(snake_case__ )
UpperCamelCase : List[str] = sorted(snake_case__ , key=lambda snake_case__ : x["id"] )
UpperCamelCase : Dict = [x['pred'] for x in records]
return preds
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : int , snake_case__ : Union[str, Any] ) -> List[Dict[str, List]]:
# WAIT FOR lots of .json files
UpperCamelCase : List[Any] = time.time()
logger.info('waiting for all nodes to finish' )
UpperCamelCase : Dict = None
while (time.time() - start_wait) < timeout:
UpperCamelCase : str = list(save_dir.glob('rank_*.json' ) )
if len(snake_case__ ) < num_replicas:
continue
try:
# make sure all json files are fully saved
UpperCamelCase : Optional[Any] = lmap(snake_case__ , snake_case__ )
return json_data
except JSONDecodeError:
continue
else:
raise TimeoutError('Rank 0 gave up on waiting for other processes' )
# Unreachable
if __name__ == "__main__":
# Usage for MT:
run_generate()
| 103 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase_ ( a__ ):
@staticmethod
@abstractmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
raise NotImplementedError()
@abstractmethod
def snake_case_ ( self ) -> str:
raise NotImplementedError()
| 103 | 1 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=99 , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=4 , ):
"""simple docstring"""
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_attention_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = type_vocab_size
a = type_sequence_label_size
a = initializer_range
a = num_choices
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_attention_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = None
if self.use_token_type_ids:
a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.prepare_config_and_inputs()
a , a , a , a = config_and_inputs
a = True
a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowercase ( lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = True
__A = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ (self ):
"""simple docstring"""
a = FlaxBertModelTester(self )
@slow
def UpperCamelCase_ (self ):
"""simple docstring"""
a = FlaxBertModel.from_pretrained("bert-base-cased" )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase_ )
| 227 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
a = tempfile.mkdtemp()
a = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a = {
"do_resize": True,
"size": {"height": 224, "width": 224},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
"do_convert_rgb": True,
}
a = os.path.join(self.tmpdirname , lowerCamelCase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(lowerCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = self.get_image_processor()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase_ )
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
a = self.get_image_processor(do_normalize=lowerCamelCase_ )
a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=lowerCamelCase_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = self.prepare_image_inputs()
a = image_processor(lowerCamelCase_ , return_tensors="np" )
a = processor(images=lowerCamelCase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = "Alexandra,T-shirt的价格是15便士。"
a = processor(text=lowerCamelCase_ )
a = tokenizer(lowerCamelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase_ ):
processor()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(lowerCamelCase_ )
a = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = ChineseCLIPProcessor(tokenizer=lowerCamelCase_ , image_processor=lowerCamelCase_ )
a = "Alexandra,T-shirt的价格是15便士。"
a = self.prepare_image_inputs()
a = processor(text=lowerCamelCase_ , images=lowerCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 227 | 1 |
def A__ ( __lowerCamelCase ):
return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
__UpperCAmelCase = int(input("Enter number: ").strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 257 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , **_A ) -> Union[str, Any]:
super().__init__(**_A )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(_A )
def __call__( self , _A , _A = None , **_A , ) -> str:
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text_queries''' )
if isinstance(_A , (str, Image.Image) ):
SCREAMING_SNAKE_CASE_ = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
SCREAMING_SNAKE_CASE_ = image
SCREAMING_SNAKE_CASE_ = super().__call__(_A , **_A )
return results
def _UpperCamelCase ( self , **_A ) -> str:
SCREAMING_SNAKE_CASE_ = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs['''threshold''']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs['''top_k''']
return {}, {}, postprocess_params
def _UpperCamelCase ( self , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = load_image(inputs['''image'''] )
SCREAMING_SNAKE_CASE_ = inputs['''candidate_labels''']
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = candidate_labels.split(''',''' )
SCREAMING_SNAKE_CASE_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_A ):
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , return_tensors=self.framework )
SCREAMING_SNAKE_CASE_ = self.image_processor(_A , return_tensors=self.framework )
yield {
"is_last": i == len(_A ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _UpperCamelCase ( self , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = model_inputs.pop('''target_size''' )
SCREAMING_SNAKE_CASE_ = model_inputs.pop('''candidate_label''' )
SCREAMING_SNAKE_CASE_ = model_inputs.pop('''is_last''' )
SCREAMING_SNAKE_CASE_ = self.model(**_A )
SCREAMING_SNAKE_CASE_ = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def _UpperCamelCase ( self , _A , _A=0.1 , _A=None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE_ = model_output['''candidate_label''']
SCREAMING_SNAKE_CASE_ = BaseModelOutput(_A )
SCREAMING_SNAKE_CASE_ = self.image_processor.post_process_object_detection(
outputs=_A , threshold=_A , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE_ = outputs['''scores'''][index].item()
SCREAMING_SNAKE_CASE_ = self._get_bounding_box(outputs['''boxes'''][index][0] )
SCREAMING_SNAKE_CASE_ = {'''score''': score, '''label''': label, '''box''': box}
results.append(_A )
SCREAMING_SNAKE_CASE_ = sorted(_A , key=lambda _A : x["score"] , reverse=_A )
if top_k:
SCREAMING_SNAKE_CASE_ = results[:top_k]
return results
def _UpperCamelCase ( self , _A ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = box.int().tolist()
SCREAMING_SNAKE_CASE_ = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 257 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __magic_name__ ( __a : Tuple ):
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __magic_name__ ( __a : List[str] ):
'''simple docstring'''
UpperCamelCase__ = create_tensor(__a )
UpperCamelCase__ = gather(__a )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __magic_name__ ( __a : Any ):
'''simple docstring'''
UpperCamelCase__ = [state.process_index]
UpperCamelCase__ = gather_object(__a )
assert len(__a ) == state.num_processes, f"{gathered_obj}, {len(__a )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), f"{gathered_obj} != {list(range(state.num_processes ) )}"
def __magic_name__ ( __a : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ = create_tensor(__a )
UpperCamelCase__ = broadcast(__a )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __magic_name__ ( __a : Tuple ):
'''simple docstring'''
if state.is_main_process:
UpperCamelCase__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
UpperCamelCase__ = torch.arange(state.num_processes ).to(state.device )
UpperCamelCase__ = pad_across_processes(__a )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __magic_name__ ( __a : Optional[int] ):
'''simple docstring'''
if state.num_processes != 2:
return
UpperCamelCase__ = create_tensor(__a )
UpperCamelCase__ = reduce(__a , """sum""" )
UpperCamelCase__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__a , __a ), f"{reduced_tensor} != {truth_tensor}"
def __magic_name__ ( __a : Any ):
'''simple docstring'''
if state.num_processes != 2:
return
UpperCamelCase__ = create_tensor(__a )
UpperCamelCase__ = reduce(__a , """mean""" )
UpperCamelCase__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__a , __a ), f"{reduced_tensor} != {truth_tensor}"
def __magic_name__ ( __a : List[Any] ):
'''simple docstring'''
main()
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = PartialState()
state.print(f"State: {state}" )
state.print("""testing gather""" )
test_gather(__a )
state.print("""testing gather_object""" )
test_gather_object(__a )
state.print("""testing broadcast""" )
test_broadcast(__a )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(__a )
state.print("""testing reduce_sum""" )
test_reduce_sum(__a )
state.print("""testing reduce_mean""" )
test_reduce_mean(__a )
if __name__ == "__main__":
main()
| 244 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Optional[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
A_ : Tuple = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
A_ : str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
A_ : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
A_ : Union[str, Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 333 | 0 |
import sys
from collections import defaultdict
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : List[Any] ) -> List[str]:
'''simple docstring'''
return self.node_position[vertex]
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = pos
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : Any ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE = 2 * start + 1
else:
SCREAMING_SNAKE_CASE = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE = temp, tempa
SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,snake_case_ )
self.top_to_bottom(snake_case_ ,snake_case_ ,snake_case_ ,snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = position[index]
while index != 0:
SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE = heap[parent]
SCREAMING_SNAKE_CASE = position[parent]
self.set_position(position[parent] ,snake_case_ )
else:
SCREAMING_SNAKE_CASE = val
SCREAMING_SNAKE_CASE = temp
self.set_position(snake_case_ ,snake_case_ )
break
SCREAMING_SNAKE_CASE = parent
else:
SCREAMING_SNAKE_CASE = val
SCREAMING_SNAKE_CASE = temp
self.set_position(snake_case_ ,0 )
def SCREAMING_SNAKE_CASE__ ( self : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ ,-1 ,-1 ):
self.top_to_bottom(snake_case_ ,snake_case_ ,len(snake_case_ ) ,snake_case_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = positions[0]
SCREAMING_SNAKE_CASE = sys.maxsize
self.top_to_bottom(snake_case_ ,0 ,len(snake_case_ ) ,snake_case_ )
return temp
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = Heap()
SCREAMING_SNAKE_CASE = [0] * len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [-1] * len(lowerCAmelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE = []
for vertex in range(len(lowerCAmelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(lowerCAmelCase__ )
heap.node_position.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = distance
heap.heapify(lowerCAmelCase__ , lowerCAmelCase__ )
for _ in range(1 , len(lowerCAmelCase__ ) ):
SCREAMING_SNAKE_CASE = heap.delete_minimum(lowerCAmelCase__ , lowerCAmelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(lowerCAmelCase__ )]
):
SCREAMING_SNAKE_CASE = distance
heap.bottom_to_top(
lowerCAmelCase__ , heap.get_position(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
SCREAMING_SNAKE_CASE_ = int(input("""Enter number of edges: """).strip())
SCREAMING_SNAKE_CASE_ = defaultdict(list)
for _ in range(edges_number):
SCREAMING_SNAKE_CASE_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 352 |
from __future__ import annotations
import math
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = str(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = [n]
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if len(str(_SCREAMING_SNAKE_CASE ) ) > 3:
if not is_prime(int(str(_SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(_SCREAMING_SNAKE_CASE )[:3] ) ):
return False
return True
def __lowercase ( _SCREAMING_SNAKE_CASE = 11 ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 13
while len(_SCREAMING_SNAKE_CASE ) != count:
if validate(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = list_truncated_nums(_SCREAMING_SNAKE_CASE )
if all(is_prime(_SCREAMING_SNAKE_CASE ) for i in list_nums ):
list_truncated_primes.append(_SCREAMING_SNAKE_CASE )
num += 2
return list_truncated_primes
def __lowercase ( ) -> int:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'''{sum(compute_truncated_primes(1_1)) = }''')
| 193 | 0 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = '''T5Config'''
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : jnp.array , __magic_name__ : int , __magic_name__ : int ) -> jnp.ndarray:
"""simple docstring"""
UpperCamelCase :List[str] = jnp.zeros_like(__magic_name__ )
UpperCamelCase :str = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCamelCase :Tuple = shifted_input_ids.at[:, 0].set(__magic_name__ )
UpperCamelCase :int = jnp.where(shifted_input_ids == -100 , __magic_name__ , __magic_name__ )
return shifted_input_ids
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : List[Any] = """mt5"""
snake_case__ : Optional[Any] = MTaConfig
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[int] = """mt5"""
snake_case__ : Any = MTaConfig
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Optional[Any] = """mt5"""
snake_case__ : List[Any] = MTaConfig
| 38 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : List[str] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 232 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=None , __magic_name__=True , __magic_name__=[0.5, 0.5, 0.5] , __magic_name__=[0.5, 0.5, 0.5] , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = size if size is not None else {'''shortest_edge''': 18}
snake_case_ : Tuple = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
snake_case_ : Any = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : int = image_size
snake_case_ : Any = min_resolution
snake_case_ : List[Any] = max_resolution
snake_case_ : Union[str, Any] = do_resize
snake_case_ : List[str] = size
snake_case_ : int = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : Any = do_normalize
snake_case_ : List[str] = image_mean
snake_case_ : Union[str, Any] = image_std
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( A__, unittest.TestCase ):
lowerCamelCase_ : Union[str, Any] = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = LevitImageProcessingTester(self )
@property
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : Any = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
snake_case_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : Tuple = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case_ : int = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 357 |
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
snake_case_ : List[Any] = 0
snake_case_ : Tuple = str(_UpperCamelCase )
while len(_UpperCamelCase ) != 1:
snake_case_ : Tuple = [int(_UpperCamelCase ) for i in num_string]
snake_case_ : Dict = 1
for i in range(0 , len(_UpperCamelCase ) ):
total *= numbers[i]
snake_case_ : str = str(_UpperCamelCase )
steps += 1
return steps
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
snake_case_ : Any = 0
snake_case_ : Tuple = str(_UpperCamelCase )
while len(_UpperCamelCase ) != 1:
snake_case_ : List[str] = [int(_UpperCamelCase ) for i in num_string]
snake_case_ : Optional[int] = 0
for i in range(0 , len(_UpperCamelCase ) ):
total += numbers[i]
snake_case_ : Tuple = str(_UpperCamelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
SCREAMING_SNAKE_CASE__:Dict = 'https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
SCREAMING_SNAKE_CASE__:Optional[int] = BASE_URL + '/user'
# https://github.com/settings/tokens
SCREAMING_SNAKE_CASE__:Optional[Any] = os.environ.get("""USER_TOKEN""", """""")
def _lowerCamelCase( a ):
__a = {
"Authorization": F"token {auth_token}",
"Accept": "application/vnd.github.v3+json",
}
return requests.get(__snake_case , headers=__snake_case ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F'''{key}: {value}''')
else:
raise ValueError("""\'USER_TOKEN\' field cannot be empty.""")
| 261 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__snake_case : Optional[int] = 50_000
__snake_case : Dict = 5_000
__snake_case , __snake_case : Union[str, Any] = os.path.split(__file__)
__snake_case : Any = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
for i in range(__snake_case ):
__lowerCAmelCase : Union[str, Any] = dataset[i]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
for i in range(0 ,len(__snake_case ) ,__snake_case ):
__lowerCAmelCase : List[str] = dataset[i : i + batch_size]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
with dataset.formatted_as(type=__snake_case ):
for i in range(__snake_case ):
__lowerCAmelCase : Union[str, Any] = dataset[i]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
with dataset.formatted_as(type=__snake_case ):
for i in range(0 ,__snake_case ,__snake_case ):
__lowerCAmelCase : Optional[int] = dataset[i : i + batch_size]
def _lowercase ( ) -> Union[str, Any]:
__lowerCAmelCase : Optional[int] = {"num examples": SPEED_TEST_N_EXAMPLES}
__lowerCAmelCase : Optional[int] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
__lowerCAmelCase : Any = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
__lowerCAmelCase : int = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
__lowerCAmelCase : str = generate_example_dataset(
os.path.join(__snake_case ,"dataset.arrow" ) ,__snake_case ,num_examples=__snake_case ,seq_shapes={"list": (100,)} ,)
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ ,str(__snake_case ) )
__lowerCAmelCase : str = func(__snake_case ,**__snake_case )
print("shuffling dataset" )
__lowerCAmelCase : Optional[int] = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " ,func.__name__ ,str(__snake_case ) )
__lowerCAmelCase : List[str] = func(
__snake_case ,**__snake_case )
with open(__snake_case ,"wb" ) as f:
f.write(json.dumps(__snake_case ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating() | 269 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = UNetaDModel(
sample_size=(32, 64) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(128, 128) ,down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") ,up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") ,)
return model
@property
def _snake_case ( self ) -> str:
torch.manual_seed(0 )
_UpperCAmelCase : str = UNetaDConditionModel(
sample_size=(64, 32) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(128, 128) ,down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") ,up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") ,cross_attention_dim=10 ,)
return model
@property
def _snake_case ( self ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = AutoencoderKL(
sample_size=(128, 64) ,in_channels=1 ,out_channels=1 ,latent_channels=1 ,layers_per_block=2 ,block_out_channels=(128, 128) ,down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") ,up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") ,)
_UpperCAmelCase : Tuple = UNetaDModel(
sample_size=(64, 32) ,in_channels=1 ,out_channels=1 ,layers_per_block=2 ,block_out_channels=(128, 128) ,down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") ,up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") ,)
return vqvae, unet
@slow
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Union[str, Any] = Mel(
x_res=self.dummy_unet.config.sample_size[1] ,y_res=self.dummy_unet.config.sample_size[0] ,)
_UpperCAmelCase : Any = DDPMScheduler()
_UpperCAmelCase : List[Any] = AudioDiffusionPipeline(vqvae=a_ ,unet=self.dummy_unet ,mel=a_ ,scheduler=a_ )
_UpperCAmelCase : str = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : Dict = torch.Generator(device=a_ ).manual_seed(42 )
_UpperCAmelCase : Optional[int] = pipe(generator=a_ ,steps=4 )
_UpperCAmelCase : Any = output.audios[0]
_UpperCAmelCase : Optional[int] = output.images[0]
_UpperCAmelCase : Any = torch.Generator(device=a_ ).manual_seed(42 )
_UpperCAmelCase : Dict = pipe(generator=a_ ,steps=4 ,return_dict=a_ )
_UpperCAmelCase : List[str] = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
_UpperCAmelCase : str = np.frombuffer(image.tobytes() ,dtype="""uint8""" )[:10]
_UpperCAmelCase : Optional[Any] = np.frombuffer(image_from_tuple.tobytes() ,dtype="""uint8""" )[:10]
_UpperCAmelCase : List[str] = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
_UpperCAmelCase : Optional[int] = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] ,y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] ,)
_UpperCAmelCase : Any = DDIMScheduler()
_UpperCAmelCase : Any = self.dummy_vqvae_and_unet
_UpperCAmelCase : Dict = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] ,unet=dummy_vqvae_and_unet[1] ,mel=a_ ,scheduler=a_ )
_UpperCAmelCase : List[Any] = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
np.random.seed(0 )
_UpperCAmelCase : Optional[Any] = np.random.uniform(-1 ,1 ,((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
_UpperCAmelCase : Optional[Any] = torch.Generator(device=a_ ).manual_seed(42 )
_UpperCAmelCase : int = pipe(raw_audio=a_ ,generator=a_ ,start_step=5 ,steps=10 )
_UpperCAmelCase : List[Any] = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
_UpperCAmelCase : Optional[int] = np.frombuffer(image.tobytes() ,dtype="""uint8""" )[:10]
_UpperCAmelCase : Optional[int] = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
_UpperCAmelCase : Tuple = self.dummy_unet_condition
_UpperCAmelCase : List[str] = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] ,unet=a_ ,mel=a_ ,scheduler=a_ )
_UpperCAmelCase : int = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
np.random.seed(0 )
_UpperCAmelCase : Optional[Any] = torch.rand((1, 1, 10) )
_UpperCAmelCase : Optional[Any] = pipe(generator=a_ ,encoding=a_ )
_UpperCAmelCase : Dict = output.images[0]
_UpperCAmelCase : List[str] = np.frombuffer(image.tobytes() ,dtype="""uint8""" )[:10]
_UpperCAmelCase : Optional[int] = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : Tuple = torch_device
_UpperCAmelCase : List[Any] = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
_UpperCAmelCase : Union[str, Any] = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
_UpperCAmelCase : str = torch.Generator(device=a_ ).manual_seed(42 )
_UpperCAmelCase : Any = pipe(generator=a_ )
_UpperCAmelCase : str = output.audios[0]
_UpperCAmelCase : int = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
_UpperCAmelCase : int = np.frombuffer(image.tobytes() ,dtype="""uint8""" )[:10]
_UpperCAmelCase : Union[str, Any] = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 349 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
A_ : List[Any] = 637_8137.0
A_ : Dict = 635_6752.31_4245
A_ : int = 6_3_7_8_1_3_7
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> float:
'''simple docstring'''
_UpperCAmelCase : Tuple = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCAmelCase : Any = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
_UpperCAmelCase : Optional[Any] = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCAmelCase : Union[str, Any] = haversine_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCAmelCase : Optional[int] = (b_lata + b_lata) / 2
_UpperCAmelCase : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCAmelCase : List[str] = (sin(lowerCAmelCase_ ) ** 2) * (cos(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = cos(sigma / 2 ) ** 2
_UpperCAmelCase : Dict = (sigma - sin(lowerCAmelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCAmelCase : Union[str, Any] = (cos(lowerCAmelCase_ ) ** 2) * (sin(lowerCAmelCase_ ) ** 2)
_UpperCAmelCase : Union[str, Any] = sin(sigma / 2 ) ** 2
_UpperCAmelCase : Optional[Any] = (sigma + sin(lowerCAmelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ = 1000000 ):
UpperCAmelCase = set(range(3 , lowercase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowercase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase_ , lowercase_ ) ) )
UpperCAmelCase = [float(lowercase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase_ , limit + 1 , lowercase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 78 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowercase__ : Optional[Any] = logging.getLogger(__name__)
def A_ ( snake_case : Any=2 , snake_case : Union[str, Any]=3 , snake_case : Union[str, Any]=16 , snake_case : int = 10 , snake_case : int = 2 ) -> int:
'''simple docstring'''
def get_dataset(snake_case : Optional[int] ):
__UpperCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__UpperCamelCase = get_dataset(snake_case )
__UpperCamelCase = get_dataset(snake_case )
__UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
__UpperCamelCase = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def A_ ( snake_case : List[str] , snake_case : int , snake_case : List[str] , snake_case : Optional[int] , snake_case : int , snake_case : str=None ) -> Any:
'''simple docstring'''
__UpperCamelCase = []
for epoch in range(snake_case ):
# Train quickly
model.train()
for batch in dataloader:
__UpperCamelCase , __UpperCamelCase = batch
__UpperCamelCase = model(snake_case )
__UpperCamelCase = torch.nn.functional.mse_loss(snake_case , snake_case )
accelerator.backward(snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self )-> Tuple:
'''simple docstring'''
super().__init__()
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
__UpperCamelCase = nn.Parameter(torch.randn(1 ) )
def A__ ( self , SCREAMING_SNAKE_CASE_ )-> Dict:
'''simple docstring'''
return x * self.a + self.b
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self )-> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A__ ( self )-> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
# Train baseline
__UpperCamelCase = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = Accelerator()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
__UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
__UpperCamelCase = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((__UpperCamelCase) , (__UpperCamelCase)) = model.a.item(), model.b.item()
__UpperCamelCase = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = torch.tensor([1, 2, 3] )
__UpperCamelCase = torch.tensor([2, 3, 4] )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(net.parameters() )
__UpperCamelCase = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def A__ ( self )-> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__UpperCamelCase = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.9_9 )
__UpperCamelCase , __UpperCamelCase = dummy_dataloaders()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
__UpperCamelCase = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def A__ ( self )-> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__UpperCamelCase = DummyModel()
__UpperCamelCase = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
__UpperCamelCase = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def A__ ( self )-> Optional[int]:
'''simple docstring'''
__UpperCamelCase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ : Optional[int] = "/tmp/accelerate/state_checkpointing"
lowercase__ : List[Any] = DummyModel()
lowercase__ : Tuple = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowercase__ : int = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowercase__ , lowercase__ : str = dummy_dataloaders()
lowercase__ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowercase__ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowercase__ , lowercase__ : str = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowercase__ : int = group["params"][0].device
break
assert param_device.type == accelerator.device.type
lowercase__ : Union[str, Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
lowercase__ : Any = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
lowercase__ : List[Any] = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 328 | 0 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: Tuple = BarthezTokenizer
A: Tuple = BarthezTokenizerFast
A: List[str] = True
A: Union[str, Any] = True
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().setUp()
UpperCamelCase__ : Tuple = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCamelCase__ )
UpperCamelCase__ : Dict = tokenizer
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : str = '''<pad>'''
UpperCamelCase__ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCamelCase__ ) , 101122 )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCamelCase__ : Tuple = [0, 57, 3018, 70307, 91, 2]
UpperCamelCase__ : Any = self.tokenizer(
lowerCamelCase__ , max_length=len(lowerCamelCase__ ) , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='''pt''' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase__ : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase__ : List[Any] = self.get_tokenizer()
UpperCamelCase__ : Optional[int] = self.get_rust_tokenizer()
UpperCamelCase__ : Optional[int] = '''I was born in 92000, and this is falsé.'''
UpperCamelCase__ : Optional[Any] = tokenizer.tokenize(lowerCamelCase__ )
UpperCamelCase__ : Optional[Any] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Tuple = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
UpperCamelCase__ : int = rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
UpperCamelCase__ : Tuple = self.get_rust_tokenizer()
UpperCamelCase__ : Tuple = tokenizer.encode(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : List[Any] = {'''input_ids''': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase__ : int = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=lowerCamelCase__ , )
| 51 |
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __magic_name__ ( unittest.TestCase):
def __init__( self : Optional[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple=7 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Optional[int]=18 , lowerCamelCase__ : Any=30 , lowerCamelCase__ : int=400 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : int=False , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Dict=[0.5, 0.5, 0.5] , lowerCamelCase__ : str=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = parent
UpperCamelCase__ : Dict = batch_size
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : int = image_size
UpperCamelCase__ : str = min_resolution
UpperCamelCase__ : str = max_resolution
UpperCamelCase__ : Tuple = do_resize
UpperCamelCase__ : str = size if size is not None else {'''height''': 18, '''width''': 20}
UpperCamelCase__ : Optional[Any] = do_thumbnail
UpperCamelCase__ : int = do_align_axis
UpperCamelCase__ : List[Any] = do_pad
UpperCamelCase__ : List[Any] = do_normalize
UpperCamelCase__ : Dict = image_mean
UpperCamelCase__ : List[Any] = image_std
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __magic_name__ ( __lowerCAmelCase , unittest.TestCase):
A: Tuple = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : str ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = DonutImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''size''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_thumbnail''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_pad''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCamelCase__ , '''image_std''' ) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
UpperCamelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def UpperCAmelCase__ ( self : Any ) -> str:
'''simple docstring'''
pass
@is_flaky()
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCamelCase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
UpperCamelCase__ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ : List[str] = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
UpperCamelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ : List[Any] = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def UpperCAmelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
UpperCamelCase__ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
UpperCamelCase__ : List[str] = image_processing(lowerCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 51 | 1 |
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : Optional[int] = [1]
__a , __a , __a : Tuple = 0, 0, 0
__a : Dict = ugly_nums[ia] * 2
__a : Tuple = ugly_nums[ia] * 3
__a : int = ugly_nums[ia] * 5
for _ in range(1 , lowerCamelCase__ ):
__a : Optional[Any] = min(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
ugly_nums.append(lowerCamelCase__ )
if next_num == next_a:
ia += 1
__a : List[Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__a : List[Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__a : Optional[int] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(2_00) = }''')
| 27 |
import enum
import shutil
import sys
UpperCAmelCase, UpperCAmelCase : Union[str, Any] = shutil.get_terminal_size()
UpperCAmelCase : Dict = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class __lowercase ( enum.Enum ):
"""simple docstring"""
UpperCamelCase : Any = 0
UpperCamelCase : int = 1
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any="" ):
'''simple docstring'''
sys.stdout.write(str(lowerCamelCase__ ) + end )
sys.stdout.flush()
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple="" ):
'''simple docstring'''
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , lowerCamelCase__ )
def __lowerCamelCase ( ):
'''simple docstring'''
forceWrite("""\r""" )
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : str ):
'''simple docstring'''
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def __lowerCamelCase ( ):
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def __lowerCamelCase ( ):
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 252 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = SwinConfig(image_size=192 )
if "base" in model_name:
__lowerCAmelCase = 6
__lowerCAmelCase = 128
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (4, 8, 16, 32)
elif "large" in model_name:
__lowerCAmelCase = 12
__lowerCAmelCase = 192
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
__lowerCAmelCase = window_size
__lowerCAmelCase = embed_dim
__lowerCAmelCase = depths
__lowerCAmelCase = num_heads
return config
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if "encoder.mask_token" in name:
__lowerCAmelCase = name.replace("encoder.mask_token" , "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
__lowerCAmelCase = name.replace("encoder.patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
__lowerCAmelCase = name.replace("encoder.patch_embed.norm" , "embeddings.norm" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__lowerCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
__lowerCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowerCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
__lowerCAmelCase = "layernorm.weight"
if name == "encoder.norm.bias":
__lowerCAmelCase = "layernorm.bias"
if "decoder" in name:
pass
else:
__lowerCAmelCase = "swin." + name
return name
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(_UpperCamelCase )
if "attn_mask" in key:
pass
elif "qkv" in key:
__lowerCAmelCase = key.split("." )
__lowerCAmelCase = int(key_split[2] )
__lowerCAmelCase = int(key_split[4] )
__lowerCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[
dim : dim * 2, :
]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[
:dim
]
__lowerCAmelCase = val[
dim : dim * 2
]
__lowerCAmelCase = val[
-dim:
]
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )["model"]
__lowerCAmelCase = get_swin_config(_UpperCamelCase )
__lowerCAmelCase = SwinForMaskedImageModeling(_UpperCamelCase )
model.eval()
__lowerCAmelCase = convert_state_dict(_UpperCamelCase , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = ViTImageProcessor(size={"height": 192, "width": 192} )
__lowerCAmelCase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
__lowerCAmelCase = image_processor(images=_UpperCamelCase , return_tensors="pt" )
with torch.no_grad():
__lowerCAmelCase = model(**_UpperCamelCase ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print(f"Pushing model and image processor for {model_name} to hub" )
model.push_to_hub(f"microsoft/{model_name}" )
image_processor.push_to_hub(f"microsoft/{model_name}" )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
A : List[str] = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 363 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Dict = logging.get_logger(__name__)
A : List[str] = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] ="""distilbert"""
__UpperCAmelCase : int ={
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self , __a=3_05_22 , __a=5_12 , __a=False , __a=6 , __a=12 , __a=7_68 , __a=4 * 7_68 , __a=0.1 , __a=0.1 , __a="gelu" , __a=0.0_2 , __a=0.1 , __a=0.2 , __a=0 , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = sinusoidal_pos_embds
__lowerCAmelCase = n_layers
__lowerCAmelCase = n_heads
__lowerCAmelCase = dim
__lowerCAmelCase = hidden_dim
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation
__lowerCAmelCase = initializer_range
__lowerCAmelCase = qa_dropout
__lowerCAmelCase = seq_classif_dropout
super().__init__(**__a , pad_token_id=__a )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def snake_case ( self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 259 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = AltDiffusionPipeline
_a = TEXT_TO_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
_a = TEXT_TO_IMAGE_IMAGE_PARAMS
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_lowercase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_lowercase =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
_lowercase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
_lowercase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_002 , )
_lowercase =CLIPTextModel(lowerCAmelCase )
_lowercase =XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
_lowercase =77
_lowercase ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
if str(lowerCAmelCase ).startswith('mps' ):
_lowercase =torch.manual_seed(lowerCAmelCase )
else:
_lowercase =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
_lowercase ={
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def A__ ( self ) -> Tuple:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def A__ ( self ) -> List[str]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase ='cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase =self.get_dummy_components()
torch.manual_seed(0 )
_lowercase =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
_lowercase =RobertaSeriesModelWithTransformation(lowerCAmelCase )
_lowercase =text_encoder
_lowercase =AltDiffusionPipeline(**lowerCAmelCase )
_lowercase =alt_pipe.to(lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase =self.get_dummy_inputs(lowerCAmelCase )
_lowercase ='A photo of an astronaut'
_lowercase =alt_pipe(**lowerCAmelCase )
_lowercase =output.images
_lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase =np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase ='cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase =self.get_dummy_components()
_lowercase =PNDMScheduler(skip_prk_steps=lowerCAmelCase )
torch.manual_seed(0 )
_lowercase =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_002 , )
# TODO: remove after fixing the non-deterministic text encoder
_lowercase =RobertaSeriesModelWithTransformation(lowerCAmelCase )
_lowercase =text_encoder
_lowercase =AltDiffusionPipeline(**lowerCAmelCase )
_lowercase =alt_pipe.to(lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase =self.get_dummy_inputs(lowerCAmelCase )
_lowercase =alt_pipe(**lowerCAmelCase )
_lowercase =output.images
_lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowercase =np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=lowerCAmelCase )
_lowercase =alt_pipe.to(lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase ='A painting of a squirrel eating a burger'
_lowercase =torch.manual_seed(0 )
_lowercase =alt_pipe([prompt] , generator=lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
_lowercase =output.images
_lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase =np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
_lowercase =AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase )
_lowercase =alt_pipe.to(lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase )
_lowercase ='A painting of a squirrel eating a burger'
_lowercase =torch.manual_seed(0 )
_lowercase =alt_pipe([prompt] , generator=lowerCAmelCase , num_inference_steps=2 , output_type='numpy' )
_lowercase =output.images
_lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowercase =np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 205 |
from __future__ import annotations
def a ( A__ : list[int] ) -> int:
"""simple docstring"""
if not nums:
return 0
_lowercase =nums[0]
_lowercase =0
for num in nums[1:]:
_lowercase , _lowercase =(
max_excluding + num,
max(A__ , A__ ),
)
return max(A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205 | 1 |
'''simple docstring'''
import sys
import turtle
def A_( A : tuple[float, float] , A : tuple[float, float]):
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def A_( A : tuple[float, float] , A : tuple[float, float] , A : tuple[float, float] , A : int , ):
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
my_pen.goto(vertexa[0] , vertexa[1])
if depth == 0:
return
triangle(A , get_mid(A , A) , get_mid(A , A) , depth - 1)
triangle(A , get_mid(A , A) , get_mid(A , A) , depth - 1)
triangle(A , get_mid(A , A) , get_mid(A , A) , depth - 1)
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
lowerCAmelCase : Optional[int] = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
lowerCAmelCase : Optional[int] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 251 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A_( A : List[Any]):
UpperCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(A , A)
def A_( A : Any):
UpperCamelCase = list(s_dict.keys())
for key in keys:
if "transformer_layers" in key:
UpperCamelCase = s_dict.pop(A)
elif "subsample" in key:
UpperCamelCase = s_dict.pop(A)
def A_( A : Optional[int]):
UpperCamelCase , UpperCamelCase = emb.weight.shape
UpperCamelCase = nn.Linear(A , A , bias=A)
UpperCamelCase = emb.weight.data
return lin_layer
def A_( A : Optional[int] , A : List[str]):
UpperCamelCase = torch.load(A , map_location='cpu')
UpperCamelCase = mam_aaa['args']
UpperCamelCase = mam_aaa['model']
UpperCamelCase = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(A)
rename_keys(A)
UpperCamelCase = state_dict['decoder.embed_tokens.weight'].shape[0]
UpperCamelCase = args.share_decoder_input_output_embed
UpperCamelCase = [int(A) for i in args.conv_kernel_sizes.split(',')]
UpperCamelCase = SpeechaTextConfig(
vocab_size=A , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , num_conv_layers=len(A) , conv_channels=args.conv_channels , conv_kernel_sizes=A , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=A , num_beams=5 , max_length=200 , use_cache=A , decoder_start_token_id=2 , early_stopping=A , )
UpperCamelCase = SpeechaTextForConditionalGeneration(A)
UpperCamelCase , UpperCamelCase = model.model.load_state_dict(A , strict=A)
if len(A) > 0 and not set(A) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
f''' but all the following weights are missing {missing}''')
if tie_embeds:
UpperCamelCase = make_linear_from_emb(model.model.decoder.embed_tokens)
else:
UpperCamelCase = lm_head_weights
model.save_pretrained(A)
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase : List[str] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 251 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ =16
lowercase__ =32
def __UpperCamelCase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int = 1_6 ):
__a : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__a : Union[str, Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(lowerCAmelCase__ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__a : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a : Dict = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a : int = 1_6
elif accelerator.mixed_precision != "no":
__a : Optional[int] = 8
else:
__a : Dict = None
return tokenizer.pad(
lowerCAmelCase__ , padding='''longest''' , max_length=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
__a : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
__a : Tuple = DataLoader(
tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ =mocked_dataloaders # noqa: F811
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] ):
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowerCAmelCase__ ) == "1":
__a : Tuple = 2
# Initialize accelerator
__a : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Optional[int] = config['''lr''']
__a : Dict = int(config['''num_epochs'''] )
__a : Optional[Any] = int(config['''seed'''] )
__a : Dict = int(config['''batch_size'''] )
__a : str = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__a : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__a : Any = batch_size // MAX_GPU_BATCH_SIZE
__a : List[str] = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase__ )
__a , __a : Any = get_dataloaders(lowerCAmelCase__ , lowerCAmelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
__a : Any = AdamW(params=model.parameters() , lr=lowerCAmelCase__ )
# Instantiate scheduler
__a : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCAmelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : Any = accelerator.prepare(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Now we train the model
for epoch in range(lowerCAmelCase__ ):
model.train()
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__a : str = model(**lowerCAmelCase__ )
__a : Tuple = outputs.loss
__a : Any = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__a : List[str] = 0
for step, batch in enumerate(lowerCAmelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a : Optional[Any] = model(**lowerCAmelCase__ )
__a : Dict = outputs.logits.argmax(dim=-1 )
__a , __a : int = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(lowerCAmelCase__ ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__a : int = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__a : Union[str, Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=lowerCAmelCase__ , references=lowerCAmelCase__ , )
__a : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , lowerCAmelCase__ )
def __UpperCamelCase ( ):
__a : Union[str, Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
__a : Tuple = parser.parse_args()
__a : List[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 216 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class UpperCamelCase__ :
def __init__(self : List[Any] , snake_case_ : int , snake_case_ : List[str]=1_3 , snake_case_ : Tuple=7 , snake_case_ : List[Any]=True , snake_case_ : List[Any]=True , snake_case_ : Dict=True , snake_case_ : Optional[int]=True , snake_case_ : str=9_9 , snake_case_ : Dict=6_4 , snake_case_ : Any=3_2 , snake_case_ : str=5 , snake_case_ : int=4 , snake_case_ : List[Any]=3_7 , snake_case_ : Any="gelu" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : str=5_1_2 , snake_case_ : Any=1_6 , snake_case_ : str=2 , snake_case_ : int=0.02 , snake_case_ : Union[str, Any]=3 , snake_case_ : Optional[Any]=4 , snake_case_ : List[Any]=None , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : Any = seq_length
__a : int = is_training
__a : Optional[int] = use_input_mask
__a : List[Any] = use_token_type_ids
__a : Dict = use_labels
__a : Tuple = vocab_size
__a : str = hidden_size
__a : List[Any] = embedding_size
__a : List[Any] = num_hidden_layers
__a : str = num_attention_heads
__a : str = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Optional[Any] = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Union[str, Any] = max_position_embeddings
__a : Any = type_vocab_size
__a : int = type_sequence_label_size
__a : int = initializer_range
__a : int = num_labels
__a : Union[str, Any] = num_choices
__a : Dict = scope
def lowerCAmelCase (self : str ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[Any] = None
if self.use_input_mask:
__a : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[Any] = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Dict = None
__a : List[str] = None
__a : Optional[Any] = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__a : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase (self : int ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def lowerCAmelCase (self : str , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : int , snake_case_ : int , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Any ):
__a : Any = MobileBertModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : List[str] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
__a : Optional[Any] = model(snake_case_ , token_type_ids=snake_case_ )
__a : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase (self : Any , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : str , snake_case_ : List[Any] ):
__a : str = MobileBertForMaskedLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase (self : Tuple , snake_case_ : Any , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Dict ):
__a : Optional[Any] = MobileBertForNextSentencePrediction(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : int = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase (self : Any , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Optional[Any] ):
__a : str = MobileBertForPreTraining(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Union[str, Any] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , next_sentence_label=snake_case_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase (self : Dict , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Dict , snake_case_ : int , snake_case_ : int , snake_case_ : str , snake_case_ : str ):
__a : str = MobileBertForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Optional[Any] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase (self : Optional[int] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Optional[int] ):
__a : Any = self.num_labels
__a : Union[str, Any] = MobileBertForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Tuple = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase (self : List[Any] , snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Optional[int] ):
__a : Union[str, Any] = self.num_labels
__a : str = MobileBertForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Any = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : Dict , snake_case_ : Union[str, Any] ):
__a : Union[str, Any] = self.num_choices
__a : List[str] = MobileBertForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Any = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase (self : Optional[Any] ):
__a : Optional[Any] = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : int = config_and_inputs
__a : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Any = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def lowerCAmelCase (self : str , snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Union[str, Any]=False ):
__a : List[str] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class in get_values(snake_case_ ):
__a : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case_ )
__a : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def lowerCAmelCase (self : Tuple ):
__a : List[Any] = MobileBertModelTester(self )
__a : int = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def lowerCAmelCase (self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase (self : Optional[Any] ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case_ )
def lowerCAmelCase (self : str ):
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case_ )
def lowerCAmelCase (self : Tuple ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case_ )
def lowerCAmelCase (self : Dict ):
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case_ )
def lowerCAmelCase (self : int ):
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case_ )
def lowerCAmelCase (self : List[Any] ):
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case_ )
def lowerCAmelCase (self : int ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case_ )
def lowerCAmelCase (self : Tuple ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case_ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
return torch.tensor(
lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ , )
lowercase__ =1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase (self : Any ):
__a : Dict = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(snake_case_ )
__a : Tuple = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
__a : str = model(snake_case_ )[0]
__a : List[Any] = torch.Size((1, 9, 5_1_2) )
self.assertEqual(output.shape , snake_case_ )
__a : Union[str, Any] = torch.tensor(
[
[
[-2.473_6526E07, 8.269_1656E04, 1.652_1838E05],
[-5.754_1704E-01, 3.905_6022E00, 4.401_1507E00],
[2.604_7359E00, 1.567_7652E00, -1.732_4188E-01],
]
] , device=snake_case_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__a : List[str] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__a : Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 216 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__lowerCAmelCase = None
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
__lowerCAmelCase = {
"albert-base-v1": 5_1_2,
"albert-large-v1": 5_1_2,
"albert-xlarge-v1": 5_1_2,
"albert-xxlarge-v1": 5_1_2,
"albert-base-v2": 5_1_2,
"albert-large-v2": 5_1_2,
"albert-xlarge-v2": 5_1_2,
"albert-xxlarge-v2": 5_1_2,
}
__lowerCAmelCase = "▁"
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : int = AlbertTokenizer
def __init__( self , _a=None , _a=None , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 351 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'funnel'
__UpperCAmelCase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self , _a=30_522 , _a=[4, 4, 4] , _a=None , _a=2 , _a=768 , _a=12 , _a=64 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1E-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = block_sizes
__a = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__a = num_decoder_layers
__a = d_model
__a = n_head
__a = d_head
__a = d_inner
__a = hidden_act
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = initializer_range
__a = initializer_std
__a = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__a = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__a = attention_type
__a = separate_cls
__a = truncate_seq
__a = pool_q_only
super().__init__(**_a )
@property
def __UpperCAmelCase ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __UpperCAmelCase ( self ):
return len(self.block_sizes )
@num_blocks.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 11 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class _lowerCamelCase( _a ):
lowercase_ : Optional[int] = """falcon"""
lowercase_ : Dict = ["""past_key_values"""]
def __init__( self, lowerCamelCase=6_50_24, lowerCamelCase=45_44, lowerCamelCase=32, lowerCamelCase=71, lowerCamelCase=1E-5, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=None, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=11, lowerCamelCase=11, **lowerCamelCase, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[str] = vocab_size
# Backward compatibility with n_embed kwarg
_lowercase : Tuple = kwargs.pop('n_embed', lowerCamelCase)
_lowercase : Optional[int] = hidden_size if n_embed is None else n_embed
_lowercase : Optional[Any] = num_hidden_layers
_lowercase : Optional[int] = num_attention_heads
_lowercase : int = layer_norm_epsilon
_lowercase : Any = initializer_range
_lowercase : Tuple = use_cache
_lowercase : List[Any] = hidden_dropout
_lowercase : Tuple = attention_dropout
_lowercase : Optional[int] = bos_token_id
_lowercase : int = eos_token_id
_lowercase : str = num_attention_heads if num_kv_heads is None else num_kv_heads
_lowercase : Optional[Any] = alibi
_lowercase : Dict = new_decoder_architecture
_lowercase : Any = multi_query # Ignored when new_decoder_architecture is True
_lowercase : Tuple = parallel_attn
_lowercase : List[Any] = bias
super().__init__(bos_token_id=lowerCamelCase, eos_token_id=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return not self.alibi
| 21 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_A : Union[str, Any] ={
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] =[
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_A : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 41 | 0 |
import unittest
from transformers import DonutProcessor
lowerCAmelCase__ = '''naver-clova-ix/donut-base'''
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Dict ):
lowercase__ : Union[str, Any] = DonutProcessor.from_pretrained(SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : Union[str, Any] = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
lowercase__ : int = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
lowercase__ : List[Any] = self.processor.tokenajson(SCREAMING_SNAKE_CASE )
self.assertDictEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
| 121 |
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = []
lowercase__ : Tuple = []
lowercase__ : Any = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
lowercase__ : Any = len(lowerCamelCase__ ) if (len(lowerCamelCase__ ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(lowerCamelCase__ ) , "Postfix".center(lowerCamelCase__ ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowerCamelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowerCamelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowerCamelCase__ ) == 0:
stack.append(lowerCamelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowerCamelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowerCamelCase__ ) # push x to stack
print(
x.center(8 ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , sep=" | " , ) # Output in tabular format
while len(lowerCamelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , ("".join(lowerCamelCase__ )).ljust(lowerCamelCase__ ) , sep=" | " , ) # Output in tabular format
return "".join(lowerCamelCase__ ) # return Postfix as str
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowerCamelCase__ ) ):
if infix[i] == "(":
lowercase__ : Tuple = ")" # change "(" to ")"
elif infix[i] == ")":
lowercase__ : Optional[Any] = "(" # change ")" to "("
return (infix_2_postfix("".join(lowerCamelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowerCAmelCase__ = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
lowerCAmelCase__ = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 121 | 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase__ = CLIPImageProcessor()
lowerCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
lowerCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 11 |
from __future__ import annotations
import time
a =list[tuple[int, int]]
a =[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a =[[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class A_ :
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : Tuple = pos_x
__lowerCamelCase : List[str] = pos_y
__lowerCamelCase : str = (pos_y, pos_x)
__lowerCamelCase : str = goal_x
__lowerCamelCase : int = goal_y
__lowerCamelCase : List[Any] = parent
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tuple[int, int] ,SCREAMING_SNAKE_CASE__ : tuple[int, int]):
__lowerCamelCase : Any = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = [self.start]
__lowerCamelCase : List[str] = False
def lowerCAmelCase ( self : List[Any]):
while self.node_queue:
__lowerCamelCase : Any = self.node_queue.pop(0)
if current_node.pos == self.target.pos:
__lowerCamelCase : Dict = True
return self.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_successors(SCREAMING_SNAKE_CASE__)
for node in successors:
self.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : Union[str, Any] = []
for action in delta:
__lowerCamelCase : Optional[Any] = parent.pos_x + action[1]
__lowerCamelCase : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE__) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.target.pos_y ,self.target.pos_x ,SCREAMING_SNAKE_CASE__))
return successors
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Node | None):
__lowerCamelCase : List[Any] = node
__lowerCamelCase : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__lowerCamelCase : int = current_node.parent
path.reverse()
return path
class A_ :
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : int = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = False
def lowerCAmelCase ( self : str):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__lowerCamelCase : Any = self.fwd_bfs.node_queue.pop(0)
__lowerCamelCase : Any = self.bwd_bfs.node_queue.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
__lowerCamelCase : List[str] = True
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = current_bwd_node
__lowerCamelCase : int = current_fwd_node
__lowerCamelCase : str = {
self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE__),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(SCREAMING_SNAKE_CASE__)
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Node ,SCREAMING_SNAKE_CASE__ : Node):
__lowerCamelCase : List[Any] = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE__)
bwd_path.pop()
bwd_path.reverse()
__lowerCamelCase : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a =(0, 0)
a =(len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a =time.time()
a =BreadthFirstSearch(init, goal)
a =bfs.search()
a =time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
a =time.time()
a =BidirectionalBreadthFirstSearch(init, goal)
a =bd_bfs.search()
a =time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 73 | 0 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 211 |
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase_ ( _a , _a=1_000 ):
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCAmelCase__ : int = n - 1
lowerCAmelCase__ : Any = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCAmelCase__ : Optional[Any] = 0
while count < prec:
lowerCAmelCase__ : Optional[Any] = random.randint(2 , n - 1 )
lowerCAmelCase__ : List[Any] = bin_exp_mod(_a , _a , _a )
if b != 1:
lowerCAmelCase__ : Dict = True
for _ in range(_a ):
if b == n - 1:
lowerCAmelCase__ : Union[str, Any] = False
break
lowerCAmelCase__ : Tuple = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCamelCase = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 211 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
SCREAMING_SNAKE_CASE = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
if isinstance(UpperCamelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
A__ = [image]
A__ = [trans(img.convert("RGB" ) ) for img in image]
A__ = torch.stack(UpperCamelCase__ )
return image
class UpperCAmelCase_ ( a__ ):
def __init__( self : Tuple , snake_case_ : str , snake_case_ : str ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
A__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
def __magic_name__ ( self : str , snake_case_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def __magic_name__ ( self : Dict , snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : List[str] ) -> int:
'''simple docstring'''
A__ = min(int(num_inference_steps * strength ) , _lowerCamelCase )
A__ = max(num_inference_steps - init_timestep , 0 )
A__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __magic_name__ ( self : Dict , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Optional[int]=None ) -> Dict:
'''simple docstring'''
if not isinstance(_lowerCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowerCamelCase )}""" )
A__ = image.to(device=_lowerCamelCase , dtype=_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
A__ = init_latents.shape
A__ = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase )
# get latents
print("add noise to latents at timestep" , _lowerCamelCase )
A__ = self.scheduler.add_noise(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A__ = init_latents
return latents
@torch.no_grad()
def __call__( self : Optional[Any] , snake_case_ : int = None , snake_case_ : Any = 0.8 , snake_case_ : Dict = 1 , snake_case_ : Any = None , snake_case_ : List[str] = 0.0 , snake_case_ : Tuple = 50 , snake_case_ : str = None , snake_case_ : Tuple = "pil" , snake_case_ : List[Any] = True , ) -> int:
'''simple docstring'''
self.check_inputs(_lowerCamelCase )
# 2. Preprocess image
A__ = preprocess(_lowerCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(_lowerCamelCase , device=self.device )
A__ = self.get_timesteps(_lowerCamelCase , _lowerCamelCase , self.device )
A__ = timesteps[:1].repeat(_lowerCamelCase )
# 4. Prepare latent variables
A__ = self.prepare_latents(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.unet.dtype , self.device , _lowerCamelCase )
A__ = latents
# 5. Denoising loop
for t in self.progress_bar(_lowerCamelCase ):
# 1. predict noise model_output
A__ = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
A__ = self.scheduler.step(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , eta=_lowerCamelCase , use_clipped_model_output=_lowerCamelCase , generator=_lowerCamelCase , ).prev_sample
A__ = (image / 2 + 0.5).clamp(0 , 1 )
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_lowerCamelCase )
| 247 |
'''simple docstring'''
import torch
from torch import nn
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1 , _lowerCamelCase=False):
super().__init__()
UpperCAmelCase__ : List[Any] = n_token
UpperCAmelCase__ : Tuple = d_embed
UpperCAmelCase__ : str = d_proj
UpperCAmelCase__ : str = cutoffs + [n_token]
UpperCAmelCase__ : List[Any] = [0] + self.cutoffs
UpperCAmelCase__ : Optional[Any] = div_val
UpperCAmelCase__ : Optional[int] = self.cutoffs[0]
UpperCAmelCase__ : Optional[int] = len(self.cutoffs) - 1
UpperCAmelCase__ : Union[str, Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
UpperCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters))
UpperCAmelCase__ : int = nn.ModuleList()
UpperCAmelCase__ : List[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase)))
else:
self.out_projs.append(_lowerCamelCase)
self.out_layers.append(nn.Linear(_lowerCamelCase , _lowerCamelCase))
else:
for i in range(len(self.cutoffs)):
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase)))
self.out_layers.append(nn.Linear(_lowerCamelCase , r_idx - l_idx))
UpperCAmelCase__ : Optional[int] = keep_order
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
if proj is None:
UpperCAmelCase__ : Dict = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase__ : Optional[int] = nn.functional.linear(_lowerCamelCase , proj.t().contiguous())
UpperCAmelCase__ : List[str] = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False):
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase__ : Optional[int] = hidden[..., :-1, :].contiguous()
UpperCAmelCase__ : int = labels[..., 1:].contiguous()
UpperCAmelCase__ : List[str] = hidden.view(-1 , hidden.size(-1))
UpperCAmelCase__ : Optional[int] = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""")
else:
UpperCAmelCase__ : Optional[int] = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
UpperCAmelCase__ : Tuple = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
UpperCAmelCase__ : Dict = labels != -100
UpperCAmelCase__ : Tuple = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device)
UpperCAmelCase__ : List[Any] = (
-nn.functional.log_softmax(_lowerCamelCase , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
UpperCAmelCase__ : List[str] = nn.functional.log_softmax(_lowerCamelCase , dim=-1)
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Dict = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ : Union[str, Any] = self.out_layers[i].weight
UpperCAmelCase__ : Any = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ : Optional[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0)
UpperCAmelCase__ : List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_lowerCamelCase)
biases.append(_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ : Optional[int] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(_lowerCamelCase , dim=1)
if labels is None:
UpperCAmelCase__ : str = hidden.new_empty((head_logit.size(0), self.n_token))
else:
UpperCAmelCase__ : Optional[Any] = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device)
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[str] = [0] + self.cutoffs
for i in range(len(_lowerCamelCase) - 1):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase__ : List[str] = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase__ : str = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase__ : List[Any] = labels.index_select(0 , _lowerCamelCase) - l_idx
UpperCAmelCase__ : List[str] = head_logprob.index_select(0 , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = hidden.index_select(0 , _lowerCamelCase)
else:
UpperCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
UpperCAmelCase__ : List[Any] = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
UpperCAmelCase__ : Tuple = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ : int = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : str = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : int = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase__ : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
UpperCAmelCase__ : List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase__ : Tuple = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""") and self.keep_order) or keep_order:
out.index_copy_(0 , _lowerCamelCase , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def snake_case__ ( self , _lowerCamelCase):
if self.n_clusters == 0:
UpperCAmelCase__ : Union[str, Any] = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(_lowerCamelCase , dim=-1)
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ : int = self.out_layers[i].weight
UpperCAmelCase__ : List[str] = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0)
UpperCAmelCase__ : Optional[int] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_lowerCamelCase)
biases.append(_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ : List[Any] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0), self.n_token))
UpperCAmelCase__ : int = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : str = [0] + self.cutoffs
for i in range(len(_lowerCamelCase) - 1):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase__ : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ : Union[str, Any] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[str] = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : Union[str, Any] = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase__ : Dict = logprob_i
return out | 163 | 0 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
SCREAMING_SNAKE_CASE__ = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class a_ :
lowercase = True
lowercase = None
# Automatically constructed
lowercase = """PIL.Image.Image"""
lowercase = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
lowercase = field(default="""Image""" , init=lowerCamelCase , repr=lowerCamelCase )
def __call__( self ) -> Dict:
"""simple docstring"""
return self.pa_type
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> dict:
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image":
"""simple docstring"""
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
UpperCamelCase = {}
UpperCamelCase ,UpperCamelCase = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(F"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase = path.split("""::""" )[-1]
try:
UpperCamelCase = string_to_dict(_SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )["""repo_id"""]
UpperCamelCase = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
UpperCamelCase = None
with xopen(_SCREAMING_SNAKE_CASE , """rb""" , use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
UpperCamelCase = BytesIO(f.read() )
UpperCamelCase = PIL.Image.open(bytes_ )
else:
UpperCamelCase = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def A__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> pa.StructArray:
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
UpperCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
UpperCamelCase = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
UpperCamelCase = storage.field("""bytes""" )
else:
UpperCamelCase = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
UpperCamelCase = storage.field("""path""" )
else:
UpperCamelCase = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCamelCase = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
UpperCamelCase = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> pa.StructArray:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE , """rb""" ) as f:
UpperCamelCase = f.read()
return bytes_
UpperCamelCase = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCamelCase = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def lowercase__ ( )-> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowercase__ ( __UpperCamelCase )-> bytes:
UpperCamelCase = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase = image.format
else:
UpperCamelCase = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__UpperCamelCase , format=__UpperCamelCase )
return buffer.getvalue()
def lowercase__ ( __UpperCamelCase )-> dict:
if hasattr(__UpperCamelCase , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__UpperCamelCase )}
def lowercase__ ( __UpperCamelCase )-> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
UpperCamelCase = array.dtype
UpperCamelCase = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
UpperCamelCase = dtype.kind
UpperCamelCase = dtype.itemsize
UpperCamelCase = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase = dtype_byteorder + dtype_kind + str(__UpperCamelCase )
UpperCamelCase = np.dtype(__UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
UpperCamelCase = PIL.Image.fromarray(array.astype(__UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(__UpperCamelCase )}
def lowercase__ ( __UpperCamelCase )-> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
UpperCamelCase ,UpperCamelCase = first_non_null_value(__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__UpperCamelCase , np.ndarray ):
UpperCamelCase = no_op_if_value_is_null(__UpperCamelCase )
return [obj_to_image_dict_func(__UpperCamelCase ) for obj in objs]
elif isinstance(__UpperCamelCase , PIL.Image.Image ):
UpperCamelCase = no_op_if_value_is_null(__UpperCamelCase )
return [obj_to_image_dict_func(__UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 357 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> Dict:
UpperCamelCase = tesseract_config if tesseract_config is not None else """"""
# apply OCR
UpperCamelCase = to_pil_image(__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = pil_image.size
UpperCamelCase = pytesseract.image_to_data(__UpperCamelCase , lang=__UpperCamelCase , output_type="""dict""" , config=__UpperCamelCase )
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
UpperCamelCase = [idx for idx, word in enumerate(__UpperCamelCase ) if not word.strip()]
UpperCamelCase = [word for idx, word in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(__UpperCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase = []
for x, y, w, h in zip(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase = [x, y, x + w, y + h]
actual_boxes.append(__UpperCamelCase )
# finally, normalize the bounding boxes
UpperCamelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
assert len(__UpperCamelCase ) == len(__UpperCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a_ ( lowerCamelCase ):
lowercase = ["""pixel_values"""]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "" , **_SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = size if size is not None else {"""height""": 224, """width""": 224}
UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = apply_ocr
UpperCamelCase = ocr_lang
UpperCamelCase = tesseract_config
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
UpperCamelCase = (size["""height"""], size["""width"""])
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(_SCREAMING_SNAKE_CASE )
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
UpperCamelCase = []
UpperCamelCase = []
for image in images:
UpperCamelCase ,UpperCamelCase = apply_tesseract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
words_batch.append(_SCREAMING_SNAKE_CASE )
boxes_batch.append(_SCREAMING_SNAKE_CASE )
if do_resize:
UpperCamelCase = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase = [flip_channel_order(_SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase = BatchFeature(data={"""pixel_values""": images} , tensor_type=_SCREAMING_SNAKE_CASE )
if apply_ocr:
UpperCamelCase = words_batch
UpperCamelCase = boxes_batch
return data
| 183 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.