code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _lowerCamelCase:
lowercase_ : int
lowercase_ : int
class _lowerCamelCase:
def __init__( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
_lowercase : list[list[Edge]] = [[] for _ in range(__snake_case)]
_lowercase : Any = size
def __getitem__( self, lowerCamelCase) -> List[Any]:
"""simple docstring"""
return iter(self._graph[vertex])
@property
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
return self._size
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.')
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).')
self._graph[from_vertex].append(Edge(__snake_case, __snake_case))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Tuple:
"""simple docstring"""
_lowercase : Tuple = deque([start_vertex])
_lowercase : list[int | None] = [None] * self.size
_lowercase : Union[str, Any] = 0
while queue:
_lowercase : Optional[int] = queue.popleft()
_lowercase : Union[str, Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
_lowercase : List[Any] = current_distance + edge.weight
_lowercase : Union[str, Any] = distances[edge.destination_vertex]
if (
isinstance(__snake_case, __snake_case)
and new_distance >= dest_vertex_distance
):
continue
_lowercase : str = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.')
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class lowercase__ ( _snake_case ):
'''simple docstring'''
A_ : Dict = """gpt_bigcode"""
A_ : str = ["""past_key_values"""]
A_ : Tuple = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __snake_case=5_0257 , __snake_case=1024 , __snake_case=768 , __snake_case=12 , __snake_case=12 , __snake_case=None , __snake_case="gelu_pytorch_tanh" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=1e-5 , __snake_case=0.02 , __snake_case=True , __snake_case=True , __snake_case=5_0256 , __snake_case=5_0256 , __snake_case=True , __snake_case=True , __snake_case=True , **__snake_case , ):
_SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
_SCREAMING_SNAKE_CASE : Optional[Any] = n_positions
_SCREAMING_SNAKE_CASE : List[Any] = n_embd
_SCREAMING_SNAKE_CASE : Union[str, Any] = n_layer
_SCREAMING_SNAKE_CASE : Union[str, Any] = n_head
_SCREAMING_SNAKE_CASE : Optional[int] = n_inner
_SCREAMING_SNAKE_CASE : Optional[Any] = activation_function
_SCREAMING_SNAKE_CASE : Union[str, Any] = resid_pdrop
_SCREAMING_SNAKE_CASE : Union[str, Any] = embd_pdrop
_SCREAMING_SNAKE_CASE : List[Any] = attn_pdrop
_SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon
_SCREAMING_SNAKE_CASE : Any = initializer_range
_SCREAMING_SNAKE_CASE : str = scale_attn_weights
_SCREAMING_SNAKE_CASE : Tuple = use_cache
_SCREAMING_SNAKE_CASE : Optional[int] = attention_softmax_in_fpaa
_SCREAMING_SNAKE_CASE : List[Any] = scale_attention_softmax_in_fpaa
_SCREAMING_SNAKE_CASE : Any = multi_query
_SCREAMING_SNAKE_CASE : Optional[int] = bos_token_id
_SCREAMING_SNAKE_CASE : Optional[int] = eos_token_id
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
| 533 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = [True] * limit
lowercase = False
lowercase = False
lowercase = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowercase = i * 2
while index < limit:
lowercase = False
lowercase = index + i
lowercase = [2]
for i in range(3 , lowerCAmelCase_ , 2 ):
if is_prime[i]:
primes.append(lowerCAmelCase_ )
return primes
def UpperCAmelCase_ ( lowerCAmelCase_ = 100_0000 ):
"""simple docstring"""
lowercase = prime_sieve(lowerCAmelCase_ )
lowercase = 0
lowercase = 0
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + length , len(lowerCAmelCase_ ) ):
lowercase = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase = j - i
lowercase = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 459 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
UpperCAmelCase : Optional[int] = ViTImageProcessor if is_vision_available() else None
@property
def UpperCAmelCase__ (self : Optional[int] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ (self : str ) -> int:
lowercase = (3, 3_2, 1_2_8)
lowercase = tempfile.mkdtemp()
# fmt: off
lowercase = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
lowercase = dict(zip(A__ , range(len(A__ ) ) ) )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A__ ) + "\n" )
lowercase = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 3_2, "width": 1_2_8},
}
lowercase = os.path.join(self.tmpdirname , A__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A__ , A__ )
def UpperCAmelCase__ (self : Dict , **A__ : List[Any] ) -> Union[str, Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A__ )
def UpperCAmelCase__ (self : Any , **A__ : Tuple ) -> Tuple:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A__ )
def UpperCAmelCase__ (self : Optional[int] ) -> int:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self : str ) -> List[str]:
lowercase = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
lowercase = Image.fromarray(np.moveaxis(A__ , 0 , -1 ) )
return image_input
def UpperCAmelCase__ (self : str ) -> List[Any]:
lowercase = self.get_tokenizer()
lowercase = self.get_image_processor()
lowercase = MgpstrProcessor(tokenizer=A__ , image_processor=A__ )
processor.save_pretrained(self.tmpdirname )
lowercase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=A__ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , A__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , A__ )
def UpperCAmelCase__ (self : Any ) -> Optional[int]:
lowercase = self.get_tokenizer()
lowercase = self.get_image_processor()
lowercase = MgpstrProcessor(tokenizer=A__ , image_processor=A__ )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase = self.get_image_processor(do_normalize=A__ , padding_value=1.0 )
lowercase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A__ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , A__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A__ )
def UpperCAmelCase__ (self : List[Any] ) -> List[Any]:
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = MgpstrProcessor(tokenizer=A__ , image_processor=A__ )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(A__ , return_tensors="np" )
lowercase = processor(images=A__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase__ (self : List[Any] ) -> Union[str, Any]:
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = MgpstrProcessor(tokenizer=A__ , image_processor=A__ )
lowercase = "test"
lowercase = processor(text=A__ )
lowercase = tokenizer(A__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ (self : int ) -> Optional[int]:
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = MgpstrProcessor(tokenizer=A__ , image_processor=A__ )
lowercase = "test"
lowercase = self.prepare_image_inputs()
lowercase = processor(text=A__ , images=A__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def UpperCAmelCase__ (self : List[Any] ) -> Union[str, Any]:
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = MgpstrProcessor(tokenizer=A__ , image_processor=A__ )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.char_decode(A__ )
lowercase = tokenizer.batch_decode(A__ )
lowercase = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(A__ , A__ )
def UpperCAmelCase__ (self : Any ) -> Optional[int]:
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = MgpstrProcessor(tokenizer=A__ , image_processor=A__ )
lowercase = None
lowercase = self.prepare_image_inputs()
lowercase = processor(text=A__ , images=A__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCAmelCase__ (self : int ) -> Optional[Any]:
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = MgpstrProcessor(tokenizer=A__ , image_processor=A__ )
lowercase = torch.randn(1 , 2_7 , 3_8 )
lowercase = torch.randn(1 , 2_7 , 5_0_2_5_7 )
lowercase = torch.randn(1 , 2_7 , 3_0_5_2_2 )
lowercase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 459 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
snake_case = int(UpperCamelCase_ )
assert noofclusters < len(UpperCamelCase_ )
# Find out the dimensionality
snake_case = len(vectors[0] )
# Will help select random centroids from among the available vectors
snake_case = list(range(len(UpperCamelCase_ ) ) )
shuffle(UpperCamelCase_ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
snake_case = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
snake_case = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
snake_case = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(UpperCamelCase_ )
]
##These nodes will assign the centroid Variables the appropriate
##values
snake_case = tf.placeholder('''float64''' ,[dim] )
snake_case = []
for centroid in centroids:
cent_assigns.append(tf.assign(UpperCamelCase_ ,UpperCamelCase_ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
snake_case = [tf.Variable(0 ) for i in range(len(UpperCamelCase_ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
snake_case = tf.placeholder('''int32''' )
snake_case = []
for assignment in assignments:
cluster_assigns.append(tf.assign(UpperCamelCase_ ,UpperCamelCase_ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
snake_case = tf.placeholder('''float''' ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
snake_case = tf.reduce_mean(UpperCamelCase_ ,0 )
##Node for computing Euclidean distances
# Placeholders for input
snake_case = tf.placeholder('''float''' ,[dim] )
snake_case = tf.placeholder('''float''' ,[dim] )
snake_case = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(UpperCamelCase_ ,UpperCamelCase_ ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
snake_case = tf.placeholder('''float''' ,[noofclusters] )
snake_case = tf.argmin(UpperCamelCase_ ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
snake_case = tf.initialize_all_variables()
# Initialize all variables
sess.run(UpperCamelCase_ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
snake_case = 1_00
for _ in range(UpperCamelCase_ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(UpperCamelCase_ ) ):
snake_case = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
snake_case = [
sess.run(UpperCamelCase_ ,feed_dict={va: vect, va: sess.run(UpperCamelCase_ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
snake_case = sess.run(
UpperCamelCase_ ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(UpperCamelCase_ ):
# Collect all the vectors assigned to this cluster
snake_case = [
vectors[i]
for i in range(len(UpperCamelCase_ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
snake_case = sess.run(
UpperCamelCase_ ,feed_dict={mean_input: array(UpperCamelCase_ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
snake_case = sess.run(UpperCamelCase_ )
snake_case = sess.run(UpperCamelCase_ )
return centroids, assignments
| 550 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
snake_case = tempfile.mkdtemp()
snake_case = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
snake_case = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
'''do_convert_rgb''': True,
}
snake_case = os.path.join(self.tmpdirname , __snake_case )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(__snake_case , __snake_case )
def a_ ( self , **__snake_case ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def a_ ( self , **__snake_case ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def a_ ( self , **__snake_case ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__snake_case )
def a_ ( self ):
shutil.rmtree(self.tmpdirname )
def a_ ( self ):
snake_case = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self ):
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer()
snake_case = self.get_image_processor()
snake_case = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_slow.save_pretrained(self.tmpdirname )
snake_case = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__snake_case )
snake_case = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_fast.save_pretrained(self.tmpdirname )
snake_case = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __snake_case )
self.assertIsInstance(processor_fast.tokenizer , __snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __snake_case )
self.assertIsInstance(processor_fast.image_processor , __snake_case )
def a_ ( self ):
snake_case = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
snake_case = self.get_image_processor(do_normalize=__snake_case )
snake_case = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=__snake_case )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
snake_case = self.prepare_image_inputs()
snake_case = image_processor(__snake_case , return_tensors='''np''' )
snake_case = processor(images=__snake_case , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
snake_case = '''Alexandra,T-shirt的价格是15便士。'''
snake_case = processor(text=__snake_case )
snake_case = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
snake_case = '''Alexandra,T-shirt的价格是15便士。'''
snake_case = self.prepare_image_inputs()
snake_case = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case = processor.batch_decode(__snake_case )
snake_case = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def a_ ( self ):
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = ChineseCLIPProcessor(tokenizer=__snake_case , image_processor=__snake_case )
snake_case = '''Alexandra,T-shirt的价格是15便士。'''
snake_case = self.prepare_image_inputs()
snake_case = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 550 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase ( unittest.TestCase ):
@property
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
_SCREAMING_SNAKE_CASE = PNDMScheduler()
_SCREAMING_SNAKE_CASE = PNDMPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
pndm.to(UpperCamelCase )
pndm.set_progress_bar_config(disable=UpperCamelCase )
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = pndm(generator=UpperCamelCase , num_inference_steps=20 , output_type="numpy" ).images
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = pndm(generator=UpperCamelCase , num_inference_steps=20 , output_type="numpy" , return_dict=UpperCamelCase )[0]
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
_SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_SCREAMING_SNAKE_CASE = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = "google/ddpm-cifar10-32"
_SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(UpperCamelCase )
_SCREAMING_SNAKE_CASE = PNDMScheduler()
_SCREAMING_SNAKE_CASE = PNDMPipeline(unet=UpperCamelCase , scheduler=UpperCamelCase )
pndm.to(UpperCamelCase )
pndm.set_progress_bar_config(disable=UpperCamelCase )
_SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = pndm(generator=UpperCamelCase , output_type="numpy" ).images
_SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_SCREAMING_SNAKE_CASE = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 493 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase :
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase ( self ):
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.4_14 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , thresholding=UpperCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCamelCase )
_SCREAMING_SNAKE_CASE = inputs["prompt"]
_SCREAMING_SNAKE_CASE = inputs["generator"]
_SCREAMING_SNAKE_CASE = inputs["num_inference_steps"]
_SCREAMING_SNAKE_CASE = inputs["output_type"]
if "image" in inputs:
_SCREAMING_SNAKE_CASE = inputs["image"]
else:
_SCREAMING_SNAKE_CASE = None
if "mask_image" in inputs:
_SCREAMING_SNAKE_CASE = inputs["mask_image"]
else:
_SCREAMING_SNAKE_CASE = None
if "original_image" in inputs:
_SCREAMING_SNAKE_CASE = inputs["original_image"]
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = pipe.encode_prompt(UpperCamelCase )
# inputs with prompt converted to embeddings
_SCREAMING_SNAKE_CASE = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
_SCREAMING_SNAKE_CASE = image
if mask_image is not None:
_SCREAMING_SNAKE_CASE = mask_image
if original_image is not None:
_SCREAMING_SNAKE_CASE = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(UpperCamelCase , UpperCamelCase , UpperCamelCase )
_SCREAMING_SNAKE_CASE = pipe(**UpperCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(UpperCamelCase )
pipe_loaded.to(UpperCamelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(UpperCamelCase , UpperCamelCase ) is None , F'`{optional_component}` did not stay set to None after loading.' , )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCamelCase )
_SCREAMING_SNAKE_CASE = inputs["generator"]
_SCREAMING_SNAKE_CASE = inputs["num_inference_steps"]
_SCREAMING_SNAKE_CASE = inputs["output_type"]
# inputs with prompt converted to embeddings
_SCREAMING_SNAKE_CASE = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
_SCREAMING_SNAKE_CASE = image
if mask_image is not None:
_SCREAMING_SNAKE_CASE = mask_image
if original_image is not None:
_SCREAMING_SNAKE_CASE = original_image
_SCREAMING_SNAKE_CASE = pipe_loaded(**UpperCamelCase )[0]
_SCREAMING_SNAKE_CASE = np.abs(to_np(UpperCamelCase ) - to_np(UpperCamelCase ) ).max()
self.assertLess(UpperCamelCase , 1e-4 )
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = self.get_dummy_components()
_SCREAMING_SNAKE_CASE = self.pipeline_class(**UpperCamelCase )
pipe.to(UpperCamelCase )
pipe.set_progress_bar_config(disable=UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCamelCase )
_SCREAMING_SNAKE_CASE = pipe(**UpperCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(UpperCamelCase )
_SCREAMING_SNAKE_CASE = self.pipeline_class.from_pretrained(UpperCamelCase )
pipe_loaded.to(UpperCamelCase )
pipe_loaded.set_progress_bar_config(disable=UpperCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
_SCREAMING_SNAKE_CASE = self.get_dummy_inputs(UpperCamelCase )
_SCREAMING_SNAKE_CASE = pipe_loaded(**UpperCamelCase )[0]
_SCREAMING_SNAKE_CASE = np.abs(to_np(UpperCamelCase ) - to_np(UpperCamelCase ) ).max()
self.assertLess(UpperCamelCase , 1e-4 ) | 493 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase__ :List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''')
@dataclass
class __a :
_a : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
_a : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_a : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'The column name of the images in the files.'} )
_a : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the training data.'} )
_a : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the validation data.'} )
_a : Optional[float] = field(
default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} )
_a : Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_a : Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = {}
if self.train_dir is not None:
_UpperCAmelCase = self.train_dir
if self.validation_dir is not None:
_UpperCAmelCase = self.validation_dir
_UpperCAmelCase = data_files if data_files else None
@dataclass
class __a :
_a : str = field(
default=UpperCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
_a : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
_a : Optional[str] = field(
default=UpperCAmelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
_a : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
_a : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_a : str = field(default=UpperCAmelCase , metadata={'help': 'Name or path of preprocessor config.'} )
_a : bool = field(
default=UpperCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_a : float = field(
default=0.7_5 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
_a : bool = field(
default=UpperCAmelCase , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class __a ( UpperCAmelCase ):
_a : float = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def lowerCAmelCase__ ( a__: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , a__ , a__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(a__ )
transformers.utils.logging.set_verbosity(a__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCAmelCase = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , a__ ) and data_args.train_val_split > 0.0:
_UpperCAmelCase = ds['train'].train_test_split(data_args.train_val_split )
_UpperCAmelCase = split['train']
_UpperCAmelCase = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = ViTMAEConfig.from_pretrained(model_args.config_name , **a__ )
elif model_args.model_name_or_path:
_UpperCAmelCase = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **a__ )
else:
_UpperCAmelCase = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
_UpperCAmelCase = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **a__ )
elif model_args.model_name_or_path:
_UpperCAmelCase = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **a__ )
else:
_UpperCAmelCase = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
_UpperCAmelCase = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
_UpperCAmelCase = ViTMAEForPreTraining(a__ )
if training_args.do_train:
_UpperCAmelCase = ds['train'].column_names
else:
_UpperCAmelCase = ds['validation'].column_names
if data_args.image_column_name is not None:
_UpperCAmelCase = data_args.image_column_name
elif "image" in column_names:
_UpperCAmelCase = 'image'
elif "img" in column_names:
_UpperCAmelCase = 'img'
else:
_UpperCAmelCase = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
_UpperCAmelCase = image_processor.size['shortest_edge']
else:
_UpperCAmelCase = (image_processor.size['height'], image_processor.size['width'])
_UpperCAmelCase = Compose(
[
Lambda(lambda a__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(a__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(a__: Optional[int] ):
_UpperCAmelCase = [transforms(a__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
_UpperCAmelCase = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(a__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
_UpperCAmelCase = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(a__ )
# Compute absolute learning rate
_UpperCAmelCase = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
_UpperCAmelCase = training_args.base_learning_rate * total_train_batch_size / 2_5_6
# Initialize our trainer
_UpperCAmelCase = Trainer(
model=a__ , args=a__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=a__ , data_collator=a__ , )
# Training
if training_args.do_train:
_UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
_UpperCAmelCase = trainer.train(resume_from_checkpoint=a__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase = trainer.evaluate()
trainer.log_metrics('eval' , a__ )
trainer.save_metrics('eval' , a__ )
# Write model card and (optionally) push to hub
_UpperCAmelCase = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**a__ )
else:
trainer.create_model_card(**a__ )
def lowerCAmelCase__ ( a__: Tuple ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 618 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase__ :int = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: Dict ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = R'\w+[.]\d+'
_UpperCAmelCase = re.findall(a__ , a__ )
for pat in pats:
_UpperCAmelCase = key.replace(a__ , '_'.join(pat.split('.' ) ) )
return key
def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: List[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_UpperCAmelCase = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_UpperCAmelCase = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_UpperCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_UpperCAmelCase = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
_UpperCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_UpperCAmelCase = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_UpperCAmelCase = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase__ ( a__: Any , a__: Optional[Any] , a__: int=4_2 ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_UpperCAmelCase = flax_model.init_weights(PRNGKey(a__ ) )
_UpperCAmelCase = flatten_dict(a__ )
_UpperCAmelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_UpperCAmelCase = rename_key(a__ )
_UpperCAmelCase = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
_UpperCAmelCase , _UpperCAmelCase = rename_key_and_reshape_tensor(a__ , a__ , a__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
_UpperCAmelCase = jnp.asarray(a__ )
return unflatten_dict(a__ )
| 618 | 1 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__UpperCamelCase : Any = {
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
__UpperCamelCase : Union[str, Any] = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
__UpperCamelCase : Any = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def lowercase ( lowerCAmelCase : str):
"""simple docstring"""
_A : Optional[int] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowercase ( lowerCAmelCase : tuple):
"""simple docstring"""
return x[0]
def lowercase ( lowerCAmelCase : str):
"""simple docstring"""
_A : Dict = get_letter_count(lowerCAmelCase)
_A : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase)
_A : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCAmelCase)
_A : Optional[Any] = ''''''.join(freq_to_letter[freq])
_A : Tuple = list(freq_to_letter_str.items())
freq_pairs.sort(key=lowerCAmelCase , reverse=lowerCAmelCase)
_A : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase)
def lowercase ( lowerCAmelCase : str):
"""simple docstring"""
_A : str = get_frequency_order(lowerCAmelCase)
_A : List[Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 417 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
# TODO Update this
__UpperCamelCase : int = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = """esm"""
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=7_6_8 , UpperCAmelCase__=1_2 , UpperCAmelCase__=1_2 , UpperCAmelCase__=3_0_7_2 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=1_0_2_6 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__="absolute" , UpperCAmelCase__=True , UpperCAmelCase__=None , UpperCAmelCase__=False , UpperCAmelCase__=False , UpperCAmelCase__=None , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> List[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
_A : List[Any] = vocab_size
_A : int = hidden_size
_A : Any = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Dict = intermediate_size
_A : Union[str, Any] = hidden_dropout_prob
_A : int = attention_probs_dropout_prob
_A : List[str] = max_position_embeddings
_A : List[str] = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : Tuple = position_embedding_type
_A : Any = use_cache
_A : Tuple = emb_layer_norm_before
_A : Union[str, Any] = token_dropout
_A : Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
_A : Dict = EsmFoldConfig()
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_A : List[str] = EsmFoldConfig(**UpperCAmelCase__ )
_A : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
_A : Tuple = get_default_vocab_list()
else:
_A : int = vocab_list
else:
_A : Dict = None
_A : Optional[int] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def _lowerCamelCase ( self ) -> Optional[Any]:
_A : str = super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase__ ):
_A : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__magic_name__ = None
__magic_name__ = True
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = 0
__magic_name__ = True
__magic_name__ = False
__magic_name__ = 1_2_8
__magic_name__ = None
def _lowerCamelCase ( self ) -> List[Any]:
if self.trunk is None:
_A : Tuple = TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase__ ):
_A : Dict = TrunkConfig(**self.trunk )
def _lowerCamelCase ( self ) -> Optional[Any]:
_A : Dict = asdict(self )
_A : str = self.trunk.to_dict()
return output
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__magic_name__ = 4_8
__magic_name__ = 1_0_2_4
__magic_name__ = 1_2_8
__magic_name__ = 3_2
__magic_name__ = 3_2
__magic_name__ = 3_2
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = False
__magic_name__ = 4
__magic_name__ = 1_2_8
__magic_name__ = None
def _lowerCamelCase ( self ) -> str:
if self.structure_module is None:
_A : List[Any] = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase__ ):
_A : Any = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
_A : List[Any] = self.sequence_state_dim // self.sequence_head_width
_A : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def _lowerCamelCase ( self ) -> Tuple:
_A : Optional[Any] = asdict(self )
_A : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class lowerCamelCase__ :
"""simple docstring"""
__magic_name__ = 3_8_4
__magic_name__ = 1_2_8
__magic_name__ = 1_6
__magic_name__ = 1_2_8
__magic_name__ = 1_2
__magic_name__ = 4
__magic_name__ = 8
__magic_name__ = 0.1
__magic_name__ = 8
__magic_name__ = 1
__magic_name__ = 2
__magic_name__ = 7
__magic_name__ = 1_0
__magic_name__ = 1e-8
__magic_name__ = 1e5
def _lowerCamelCase ( self ) -> Union[str, Any]:
return asdict(self )
def lowercase ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 417 | 1 |
import functools
from typing import Any
def _a ( lowercase__ : str , lowercase__ : list[str] ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or len(lowercase__ ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(lowercase__ , lowercase__ ) or not all(
isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
SCREAMING_SNAKE_CASE__ : dict[str, Any] = {}
SCREAMING_SNAKE_CASE__ : List[Any] = 'WORD_KEEPER'
for word in words:
SCREAMING_SNAKE_CASE__ : List[Any] = trie
for c in word:
if c not in trie_node:
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
SCREAMING_SNAKE_CASE__ : Any = trie_node[c]
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : int = len(lowercase__ )
# Dynamic programming method
@functools.cache
def is_breakable(lowercase__ : int ) -> bool:
if index == len_string:
return True
SCREAMING_SNAKE_CASE__ : Tuple = trie
for i in range(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Any = trie_node.get(string[i] , lowercase__ )
if trie_node is None:
return False
if trie_node.get(lowercase__ , lowercase__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
def lowerCamelCase_ ( lowerCAmelCase: int )-> int:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
_snake_case : int = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(lowerCAmelCase )
else:
_snake_case : str = sylvester(number - 1 )
_snake_case : Optional[int] = num - 1
_snake_case : List[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 411 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase__ : List[Any] = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "efficientnet"
def __init__( self : List[str] ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 600 ,lowerCamelCase__ : float = 2.0 ,lowerCamelCase__ : float = 3.1 ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] ,lowerCamelCase__ : List[int] = [32, 16, 24, 40, 80, 112, 192] ,lowerCamelCase__ : List[int] = [16, 24, 40, 80, 112, 192, 320] ,lowerCamelCase__ : List[int] = [] ,lowerCamelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] ,lowerCamelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] ,lowerCamelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] ,lowerCamelCase__ : float = 0.2_5 ,lowerCamelCase__ : str = "swish" ,lowerCamelCase__ : int = 2_560 ,lowerCamelCase__ : str = "mean" ,lowerCamelCase__ : float = 0.0_2 ,lowerCamelCase__ : float = 0.0_0_1 ,lowerCamelCase__ : float = 0.9_9 ,lowerCamelCase__ : float = 0.5 ,lowerCamelCase__ : float = 0.2 ,**lowerCamelCase__ : Dict ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = image_size
UpperCAmelCase__ = width_coefficient
UpperCAmelCase__ = depth_coefficient
UpperCAmelCase__ = depth_divisor
UpperCAmelCase__ = kernel_sizes
UpperCAmelCase__ = in_channels
UpperCAmelCase__ = out_channels
UpperCAmelCase__ = depthwise_padding
UpperCAmelCase__ = strides
UpperCAmelCase__ = num_block_repeats
UpperCAmelCase__ = expand_ratios
UpperCAmelCase__ = squeeze_expansion_ratio
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dim
UpperCAmelCase__ = pooling_type
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = batch_norm_eps
UpperCAmelCase__ = batch_norm_momentum
UpperCAmelCase__ = dropout_rate
UpperCAmelCase__ = drop_connect_rate
UpperCAmelCase__ = sum(lowerCamelCase__ ) * 4
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = version.parse("1.11" )
@property
def __lowerCAmelCase ( self : int ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCAmelCase ( self : Optional[Any] ):
return 1e-5
| 632 | """simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCAmelCase__ : str = 'base_with_context'
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f'''layers_{lyr_num}''']
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = ly_weight['attention']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
UpperCAmelCase__ = weights[f'''layers_{lyr_num}''']
UpperCAmelCase__ = ly_weight['attention']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=lowerCamelCase )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
UpperCAmelCase__ = weights[f'''layers_{lyr_num}''']
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
UpperCAmelCase__ = ly_weight['self_attention']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = ly_weight['MultiHeadDotProductAttention_0']
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
UpperCAmelCase__ = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
UpperCAmelCase__ = jnp.tree_util.tree_map(onp.array , lowerCamelCase )
UpperCAmelCase__ = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
UpperCAmelCase__ = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
UpperCAmelCase__ = inference.parse_training_gin_file(lowerCamelCase , lowerCamelCase )
UpperCAmelCase__ = inference.InferenceModel(args.checkpoint_path , lowerCamelCase )
UpperCAmelCase__ = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
UpperCAmelCase__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
UpperCAmelCase__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
UpperCAmelCase__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
UpperCAmelCase__ = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , lowerCamelCase )
UpperCAmelCase__ = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , lowerCamelCase )
UpperCAmelCase__ = load_decoder(ta_checkpoint['target']['decoder'] , lowerCamelCase )
UpperCAmelCase__ = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
UpperCAmelCase__ = SpectrogramDiffusionPipeline(
notes_encoder=lowerCamelCase , continuous_encoder=lowerCamelCase , decoder=lowerCamelCase , scheduler=lowerCamelCase , melgan=lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
lowerCAmelCase__ : List[str] = parser.parse_args()
main(args)
| 632 | 1 |
"""simple docstring"""
import string
def lowercase__ ( snake_case_ :Any ):
__UpperCAmelCase = ''
for i in sequence:
__UpperCAmelCase = ord(a__ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def lowercase__ ( snake_case_ :str ):
__UpperCAmelCase = string.ascii_letters
__UpperCAmelCase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(a__ )] if c in letters else c for c in sequence )
def lowercase__ ( ):
from timeit import timeit
print('''Running performance benchmarks...''' )
__UpperCAmelCase = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(F'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=a__ )} seconds''' )
print(F'''> atbash(): {timeit("atbash(printable)" , setup=a__ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 49 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
SCREAMING_SNAKE_CASE_ = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
SCREAMING_SNAKE_CASE_ = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
SCREAMING_SNAKE_CASE_ = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def __UpperCAmelCase ( self : Optional[int] , snake_case : str , snake_case : int , snake_case : Dict=None , snake_case : List[str]="uniform_average" , snake_case : Any=True ):
"""simple docstring"""
_snake_case : str = mean_squared_error(
snake_case , snake_case , sample_weight=snake_case , multioutput=snake_case , squared=snake_case )
return {"mse": mse}
| 517 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3_2 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=1_6 , lowerCAmelCase__=[3_2, 6_4, 1_2_8] , lowerCAmelCase__=[1, 2, 1] , lowerCAmelCase__=[2, 2, 4] , lowerCAmelCase__=2 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=1_0 , lowerCAmelCase__=8 , lowerCAmelCase__=["stage1", "stage2"] , lowerCAmelCase__=[1, 2] , ):
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = window_size
__SCREAMING_SNAKE_CASE = mlp_ratio
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = use_absolute_embeddings
__SCREAMING_SNAKE_CASE = patch_norm
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = encoder_stride
__SCREAMING_SNAKE_CASE = out_features
__SCREAMING_SNAKE_CASE = out_indices
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = FocalNetModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
__SCREAMING_SNAKE_CASE = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = FocalNetBackbone(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1])
# verify backbone works with out_features=None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = FocalNetBackbone(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = FocalNetForMaskedImageModeling(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FocalNetForMaskedImageModeling(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = FocalNetForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ , labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FocalNetForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE_ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : List[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__lowercase : Optional[int] = (
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
__lowercase : Dict = False
__lowercase : Optional[Any] = False
__lowercase : int = False
__lowercase : int = False
__lowercase : Optional[Any] = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = FocalNetModelTester(self)
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=3_7 , has_text_modality=lowerCAmelCase__)
def snake_case_ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ ( self):
return
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@unittest.skip(reason="""FocalNet does not use inputs_embeds""")
def snake_case_ ( self):
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""")
def snake_case_ ( self):
pass
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = outputs.hidden_states
__SCREAMING_SNAKE_CASE = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths) + 1)
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
# FocalNet has a different seq_length
__SCREAMING_SNAKE_CASE = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
__SCREAMING_SNAKE_CASE = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = reshaped_hidden_states[0].shape
__SCREAMING_SNAKE_CASE = (
reshaped_hidden_states[0].view(lowerCAmelCase__ , lowerCAmelCase__ , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
__SCREAMING_SNAKE_CASE = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
__SCREAMING_SNAKE_CASE = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__SCREAMING_SNAKE_CASE = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width))
@slow
def snake_case_ ( self):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = FocalNetModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = _config_zero_init(lowerCAmelCase__)
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(config=lowerCAmelCase__)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case_ ( self):
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""") if is_vision_available() else None
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""").to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
__SCREAMING_SNAKE_CASE = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowerCAmelCase__)
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = torch.tensor([0.21_66, -0.43_68, 0.21_91]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item() , 2_8_1)
@require_torch
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : str = (FocalNetBackbone,) if is_torch_available() else ()
__lowercase : Tuple = FocalNetConfig
__lowercase : Any = False
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = FocalNetModelTester(self)
| 248 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__magic_name__ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if got_ver is None or want_ver is None:
raise ValueError(
f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
f" reinstalling {pkg}." )
if not ops[op](version.parse(UpperCamelCase_ ) , version.parse(UpperCamelCase_ ) ):
raise ImportError(
f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ = None ):
__SCREAMING_SNAKE_CASE = f"\n{hint}" if hint is not None else """"""
# non-versioned check
if re.match(r"""^[\w_\-\d]+$""" , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = requirement, None, None
else:
__SCREAMING_SNAKE_CASE = re.findall(r"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , UpperCamelCase_ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
f" got {requirement}" )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = match[0]
__SCREAMING_SNAKE_CASE = want_full.split(""",""" ) # there could be multiple requirements
__SCREAMING_SNAKE_CASE = {}
for w in want_range:
__SCREAMING_SNAKE_CASE = re.findall(r"""^([\s!=<>]{1,2})(.+)""" , UpperCamelCase_ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
f" but got {requirement}" )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = match[0]
__SCREAMING_SNAKE_CASE = want_ver
if op not in ops:
raise ValueError(f"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
__SCREAMING_SNAKE_CASE = """.""".join([str(UpperCamelCase_ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return
# check if any version is installed
try:
__SCREAMING_SNAKE_CASE = importlib.metadata.version(UpperCamelCase_ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(UpperCamelCase_ , UpperCamelCase_ )
| 248 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : Tuple = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowercase__ ( snake_case_ ):
'''simple docstring'''
_snake_case = '''decision_transformer'''
_snake_case = ['''past_key_values''']
_snake_case = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , lowerCamelCase__=1_7 , lowerCamelCase__=4 , lowerCamelCase__=1_2_8 , lowerCamelCase__=4_0_9_6 , lowerCamelCase__=True , lowerCamelCase__=1 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=3 , lowerCamelCase__=1 , lowerCamelCase__=None , lowerCamelCase__="relu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1e-5 , lowerCamelCase__=0.02 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=5_0_2_5_6 , lowerCamelCase__=5_0_2_5_6 , lowerCamelCase__=False , lowerCamelCase__=False , **lowerCamelCase__ , ):
'''simple docstring'''
UpperCamelCase = state_dim
UpperCamelCase = act_dim
UpperCamelCase = hidden_size
UpperCamelCase = max_ep_len
UpperCamelCase = action_tanh
UpperCamelCase = vocab_size
UpperCamelCase = n_positions
UpperCamelCase = n_layer
UpperCamelCase = n_head
UpperCamelCase = n_inner
UpperCamelCase = activation_function
UpperCamelCase = resid_pdrop
UpperCamelCase = embd_pdrop
UpperCamelCase = attn_pdrop
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = scale_attn_weights
UpperCamelCase = use_cache
UpperCamelCase = scale_attn_by_inverse_layer_idx
UpperCamelCase = reorder_and_upcast_attn
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
| 212 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=9_9 , lowerCamelCase__=3_2 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_2 , lowerCamelCase__=1_6 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ):
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs
UpperCamelCase = True
UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowercase__ ( snake_case_, unittest.TestCase ):
'''simple docstring'''
_snake_case = True
_snake_case = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = FlaxRobertaModelTester(self )
@slow
def UpperCAmelCase ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained('''roberta-base''' , from_pt=lowerCamelCase__ )
UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
| 212 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class _a ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
"""simple docstring"""
a__ : Union[str, Any] = tf.convert_to_tensor(
[
[
8.2_2_2_0_9_9_1, # 3rd highest value; idx. 0
-0.5_6_2_0_0_4_4,
5.2_3_2_2_9_7_5_2,
4.0_3_8_6_3_9_3,
-6.8_7_9_8_3_7_8,
-0.5_4_7_8_5_8_0_2,
-3.2_0_1_2_1_5_3,
2.9_2_7_7_7_1_7_6,
1.8_8_1_7_1_9_5_3,
7.3_5_3_4_1_2_7_6, # 5th highest value; idx. 9
8.4_3_2_0_7_8_3_3, # 2nd highest value; idx. 10
-9.8_5_7_1_1_8_3_6,
-5.9_6_2_0_9_2_3_6,
-1.1_3_0_3_9_1_6_1,
-7.1_1_1_5_2_9_4,
-0.8_3_6_9_6_3_3,
-5.3_1_8_6_4_0_8,
7.0_6_4_2_7_4_0_7,
0.8_1_3_6_9_3_4_4,
-0.8_2_0_2_3_8_1_7,
-5.9_1_7_9_7_9_6,
0.5_8_8_1_3_4_4_3,
-6.9_9_7_7_8_4_3_8,
4.7_1_5_5_1_1_8_9,
-0.1_8_7_7_1_6_3_7,
7.4_4_0_2_0_7_5_9, # 4th highest value; idx. 25
9.3_8_4_5_0_9_8_7, # 1st highest value; idx. 26
2.1_2_6_6_2_9_4_1,
-9.3_2_5_6_2_0_3_8,
2.3_5_6_5_2_5_2_2,
], # cummulative prob of 5 highest values <= 0.6
[
0.5_8_4_2_5_5_1_8,
4.5_3_1_3_9_2_3_8,
-5.5_7_5_1_0_4_6_4,
-6.2_8_0_3_0_6_9_9,
-7.1_9_5_2_9_5_0_3,
-4.0_2_1_2_2_5_5_1,
1.3_9_3_3_7_0_3_7,
-6.0_6_7_0_7_0_5_7,
1.5_9_4_8_0_5_1_7,
-9.6_4_3_1_1_9,
0.0_3_9_0_7_7_9_9,
0.6_7_2_3_1_7_6_2,
-8.8_8_2_0_6_7_2_6,
6.2_7_1_1_5_9_2_2, # 4th highest value; idx. 13
2.2_8_5_2_0_7_2_3,
4.8_2_7_6_7_5_0_6,
4.3_0_4_2_1_3_6_8,
8.8_2_7_5_3_1_3, # 2nd highest value; idx. 17
5.4_4_0_2_9_9_5_8, # 5th highest value; idx. 18
-4.4_7_3_5_7_9_4,
7.3_8_5_7_9_5_3_6, # 3rd highest value; idx. 20
-2.9_1_0_5_1_6_6_3,
2.6_1_9_4_6_0_7_7,
-2.5_6_7_4_7_6_2,
-9.4_8_9_5_9_3_0_2,
-4.0_2_9_2_2_6_4_5,
-1.3_5_4_1_6_9_1_8,
9.6_7_7_0_2_3_2_3, # 1st highest value; idx. 27
-5.8_9_4_7_8_5_5_3,
1.8_5_3_7_0_4_6_7,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
a__ : int = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
a__ : str = tf.convert_to_tensor(
[8.2_2_2_0_9_9, 7.3_5_3_4_1_2_6, 8.4_3_2_0_7_8, 7.4_4_0_2_0_7_5, 9.3_8_4_5_1, 6.2_7_1_1_5_9, 8.8_2_7_5_3_1, 5.4_4_0_2_9_9_5, 7.3_8_5_7_9_5_6, 9.6_7_7_0_2_3] , dtype=tf.floataa , ) # expected non filtered values as noted above
a__ : str = tf_top_k_top_p_filtering(UpperCAmelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
a__ : int = output[output != -float("inf" )]
a__ : List[Any] = tf.cast(
tf.where(tf.not_equal(UpperCAmelCase__ , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1E-12 )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@require_tf
class _a ( unittest.TestCase , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if is_tf_available():
A :Any = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def _A ( self ):
"""simple docstring"""
a__ : List[str] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a__ : Dict = 2
a__ : str = 2
class _a ( tf.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super(UpperCAmelCase__ , self ).__init__()
a__ : str = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ),
) , jit_compile=UpperCAmelCase__ , )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Optional[int] = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
a__ : Dict = [[2, 0], [102, 103]]
a__ : int = [[1, 0], [1, 1]]
a__ : List[Any] = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={"serving_default": dummy_model.serving} )
a__ : Union[str, Any] = tf.saved_model.load(UpperCAmelCase__ ).signatures["serving_default"]
for batch_size in range(1 , len(UpperCAmelCase__ ) + 1 ):
a__ : Dict = {
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
a__ : List[str] = serving_func(**UpperCAmelCase__ )["sequences"]
a__ : List[str] = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def _A ( self ):
"""simple docstring"""
a__ : str = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a__ : Tuple = 1
a__ : int = 2
class _a ( tf.Module ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
"""simple docstring"""
super(UpperCAmelCase__ , self ).__init__()
a__ : List[str] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ),
) , jit_compile=UpperCAmelCase__ , )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
a__ : Tuple = self.model.generate(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ , return_dict_in_generate=UpperCAmelCase__ , )
return {"sequences": outputs["sequences"]}
a__ : int = [[2], [102, 103]]
a__ : Optional[Any] = [[1], [1, 1]]
a__ : int = DummyModel(model=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={"serving_default": dummy_model.serving} )
a__ : Optional[int] = tf.saved_model.load(UpperCAmelCase__ ).signatures["serving_default"]
for input_row in range(len(UpperCAmelCase__ ) ):
a__ : Any = {
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
a__ : int = serving_func(**UpperCAmelCase__ )["sequences"]
a__ : Any = test_model.generate(**UpperCAmelCase__ , max_new_tokens=UpperCAmelCase__ )
tf.debugging.assert_equal(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
@require_tensorflow_text
def _A ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=UpperCAmelCase__ )
class _a ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
super().__init__()
a__ : Optional[Any] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(UpperCAmelCase__ , "spiece.model" ) , "rb" ).read() )
a__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def _A ( self , __UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
a__ : int = self.tokenizer.tokenize(UpperCAmelCase__ )
a__ , a__ : Dict = text.pad_model_inputs(
UpperCAmelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
a__ : List[str] = self.model.generate(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
return self.tokenizer.detokenize(UpperCAmelCase__ )
a__ : Tuple = CompleteSentenceTransformer()
a__ : List[str] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" )
a__ : Dict = complete_model(UpperCAmelCase__ )
a__ : List[Any] = tf.keras.Model(UpperCAmelCase__ , UpperCAmelCase__ )
keras_model.save(UpperCAmelCase__ )
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
a__ : str = 14
a__ : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a__ : int = "Hello, my dog is cute and"
a__ : Any = tokenizer(UpperCAmelCase__ , return_tensors="tf" )
a__ : Optional[int] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
a__ : Tuple = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
a__ : List[Any] = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
a__ : Union[str, Any] = [638, 198]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
a__ : Tuple = model.generate(**UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _A ( self ):
"""simple docstring"""
a__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
a__ : Any = "Hugging Face is a technology company based in New York and Paris."
a__ : List[Any] = bart_tokenizer(UpperCAmelCase__ , return_tensors="tf" ).input_ids
a__ : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
a__ : List[str] = bart_model.generate(UpperCAmelCase__ ).numpy()
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _A ( self , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ):
"""simple docstring"""
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
a__ : Tuple = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
a__ : int = bart_model.generate(UpperCAmelCase__ , foo="bar" ).numpy()
self.assertTrue(np.array_equal(UpperCAmelCase__ , UpperCAmelCase__ ) )
class _a ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def _A ( self , __UpperCAmelCase , **__UpperCAmelCase ):
"""simple docstring"""
return super().call(UpperCAmelCase__ , **UpperCAmelCase__ )
a__ : Any = FakeEncoder(bart_model.config , bart_model.model.shared )
a__ : str = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
a__ : int = bart_model.generate(UpperCAmelCase__ ).numpy()
with self.assertRaises(UpperCAmelCase__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(UpperCAmelCase__ , foo="bar" )
| 721 |
from collections.abc import Callable
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
a__ : float = a
a__ : float = b
if function(__UpperCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(__UpperCamelCase ) == 0:
return b
elif (
function(__UpperCamelCase ) * function(__UpperCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
a__ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__UpperCamelCase ) == 0:
return mid
elif function(__UpperCamelCase ) * function(__UpperCamelCase ) < 0:
a__ : int = mid
else:
a__ : List[Any] = mid
a__ : Optional[Any] = start + (end - start) / 2.0
return mid
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 207 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class lowercase_ ( unittest.TestCase):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=[0.5, 0.5, 0.5] , _UpperCAmelCase=True , _UpperCAmelCase=1 / 255 , _UpperCAmelCase=True , ):
"""simple docstring"""
a_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
a_ = parent
a_ = batch_size
a_ = num_channels
a_ = min_resolution
a_ = max_resolution
a_ = do_resize
a_ = size
a_ = do_normalize
a_ = image_mean
a_ = image_std
a_ = do_rescale
a_ = rescale_factor
a_ = do_pad
def lowercase__ ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
if not batched:
a_ = image_inputs[0]
if isinstance(lowerCamelCase__ , Image.Image ):
a_ = image.size
else:
a_ = image.shape[1], image.shape[2]
if w < h:
a_ = int(self.size["""shortest_edge"""] * h / w )
a_ = self.size["shortest_edge"]
elif w > h:
a_ = self.size["shortest_edge"]
a_ = int(self.size["""shortest_edge"""] * w / h )
else:
a_ = self.size["shortest_edge"]
a_ = self.size["shortest_edge"]
else:
a_ = []
for image in image_inputs:
a_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a_ = max(lowerCamelCase__ , key=lambda _UpperCAmelCase : item[0] )[0]
a_ = max(lowerCamelCase__ , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase_ ( UpperCAmelCase__ ,unittest.TestCase):
"""simple docstring"""
snake_case_ = YolosImageProcessor if is_vision_available() else None
def lowercase__ ( self ):
"""simple docstring"""
a_ = YolosImageProcessingTester(self )
@property
def lowercase__ ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size""" ) )
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1_333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase__ )
a_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase__ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase__ )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
a_ = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
a_ = image_processing(lowerCamelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
a_ = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ = image_processing(lowerCamelCase__ , return_tensors="""pt""" ).pixel_values
a_ = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
a_ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
a_ = self.image_processor_tester.get_expected_values(lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a_ = image_processing(lowerCamelCase__ , return_tensors="""pt""" ).pixel_values
a_ = self.image_processor_tester.get_expected_values(lowerCamelCase__ , batched=lowerCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.image_processing_class(**self.image_processor_dict )
a_ = self.image_processing_class(do_resize=lowerCamelCase__ , do_normalize=lowerCamelCase__ , do_rescale=lowerCamelCase__ )
# create random PyTorch tensors
a_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
a_ = image_processing_a.pad(lowerCamelCase__ , return_tensors="""pt""" )
a_ = image_processing_a(lowerCamelCase__ , return_tensors="""pt""" )
self.assertTrue(
torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
a_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
a_ = json.loads(f.read() )
a_ = {"image_id": 39_769, "annotations": target}
# encode them
a_ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" )
a_ = image_processing(images=lowerCamelCase__ , annotations=lowerCamelCase__ , return_tensors="""pt""" )
# verify pixel values
a_ = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCamelCase__ )
a_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCamelCase__ , atol=1e-4 ) )
# verify area
a_ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCamelCase__ ) )
# verify boxes
a_ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCamelCase__ )
a_ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCamelCase__ , atol=1e-3 ) )
# verify image_id
a_ = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCamelCase__ ) )
# verify is_crowd
a_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCamelCase__ ) )
# verify class_labels
a_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCamelCase__ ) )
# verify orig_size
a_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCamelCase__ ) )
# verify size
a_ = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCamelCase__ ) )
@slow
def lowercase__ ( self ):
"""simple docstring"""
a_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
a_ = json.loads(f.read() )
a_ = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
a_ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
a_ = YolosImageProcessor(format="""coco_panoptic""" )
a_ = image_processing(images=lowerCamelCase__ , annotations=lowerCamelCase__ , masks_path=lowerCamelCase__ , return_tensors="""pt""" )
# verify pixel values
a_ = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , lowerCamelCase__ )
a_ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCamelCase__ , atol=1e-4 ) )
# verify area
a_ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCamelCase__ ) )
# verify boxes
a_ = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCamelCase__ )
a_ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCamelCase__ , atol=1e-3 ) )
# verify image_id
a_ = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCamelCase__ ) )
# verify is_crowd
a_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCamelCase__ ) )
# verify class_labels
a_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCamelCase__ ) )
# verify masks
a_ = 822_873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowerCamelCase__ )
# verify orig_size
a_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCamelCase__ ) )
# verify size
a_ = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCamelCase__ ) ) | 483 | class _lowercase :
'''simple docstring'''
def __init__( self ):
lowerCAmelCase_: dict[str, TrieNode] = {} # Mapping from char to TrieNode
lowerCAmelCase_: str = False
def _a ( self , lowerCamelCase__ ):
for word in words:
self.insert(lowerCamelCase__ )
def _a ( self , lowerCamelCase__ ):
lowerCAmelCase_: Union[str, Any] = self
for char in word:
if char not in curr.nodes:
lowerCAmelCase_: List[Any] = TrieNode()
lowerCAmelCase_: Optional[Any] = curr.nodes[char]
lowerCAmelCase_: Dict = True
def _a ( self , lowerCamelCase__ ):
lowerCAmelCase_: Optional[Any] = self
for char in word:
if char not in curr.nodes:
return False
lowerCAmelCase_: Dict = curr.nodes[char]
return curr.is_leaf
def _a ( self , lowerCamelCase__ ):
def _delete(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
if index == len(lowerCamelCase__ ):
# If word does not exist
if not curr.is_leaf:
return False
lowerCAmelCase_: List[str] = False
return len(curr.nodes ) == 0
lowerCAmelCase_: Union[str, Any] = word[index]
lowerCAmelCase_: Optional[int] = curr.nodes.get(lowerCamelCase__ )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
lowerCAmelCase_: List[Any] = _delete(lowerCamelCase__ , lowerCamelCase__ , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , lowerCamelCase__ , 0 )
def snake_case__ ( lowercase , lowercase ):
if node.is_leaf:
print(lowercase , end=" " )
for key, value in node.nodes.items():
print_words(lowercase , word + key )
def snake_case__ ( ):
lowerCAmelCase_: Any = "banana bananas bandana band apple all beast".split()
lowerCAmelCase_: Optional[Any] = TrieNode()
root.insert_many(lowercase )
# print_words(root, "")
assert all(root.find(lowercase ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def snake_case__ ( lowercase , lowercase ):
print(str(lowercase ) , "works!" if passes else "doesn't work :(" )
def snake_case__ ( ):
assert test_trie()
def snake_case__ ( ):
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main() | 613 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase : Optional[int] = 16
__lowerCAmelCase : List[str] = 32
def __lowerCAmelCase ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 1_6 ):
'''simple docstring'''
snake_case_ : List[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
snake_case_ : Union[str, Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__UpperCamelCase : Tuple ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : List[str] = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__UpperCamelCase : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : Tuple = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Any = 1_6
elif accelerator.mixed_precision != "no":
snake_case_ : Any = 8
else:
snake_case_ : str = None
return tokenizer.pad(
__UpperCamelCase , padding="""longest""" , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
snake_case_ : List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
snake_case_ : List[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCAmelCase : Optional[Any] = mocked_dataloaders # noqa: F811
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __UpperCamelCase ) == "1":
snake_case_ : Union[str, Any] = 2
# New Code #
snake_case_ : Any = int(args.gradient_accumulation_steps )
snake_case_ : List[Any] = int(args.local_sgd_steps )
# Initialize accelerator
snake_case_ : Dict = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__UpperCamelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : List[str] = config["""lr"""]
snake_case_ : int = int(config["""num_epochs"""] )
snake_case_ : List[Any] = int(config["""seed"""] )
snake_case_ : int = int(config["""batch_size"""] )
snake_case_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
set_seed(__UpperCamelCase )
snake_case_ , snake_case_ : str = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : List[str] = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
snake_case_ : int = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[Any] = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
with LocalSGD(
accelerator=__UpperCamelCase , model=__UpperCamelCase , local_sgd_steps=__UpperCamelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__UpperCamelCase ):
snake_case_ : List[Any] = model(**__UpperCamelCase )
snake_case_ : Dict = output.loss
accelerator.backward(__UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Optional[int] = model(**__UpperCamelCase )
snake_case_ : Optional[Any] = outputs.logits.argmax(dim=-1 )
snake_case_ , snake_case_ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
snake_case_ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , __UpperCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__UpperCamelCase , default=__UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__UpperCamelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=__UpperCamelCase , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
snake_case_ : List[Any] = parser.parse_args()
snake_case_ : Any = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 21 |
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def is_in_circle(__UpperCamelCase : float , __UpperCamelCase : float ) -> bool:
snake_case_ : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
snake_case_ : Tuple = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__UpperCamelCase ) )
# The ratio of the area for circle to square is pi/4.
snake_case_ : Union[str, Any] = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Callable[[float], float] , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 , ):
'''simple docstring'''
return mean(
function_to_integrate(uniform(__UpperCamelCase , __UpperCamelCase ) ) for _ in range(__UpperCamelCase ) ) * (max_value - min_value)
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ):
'''simple docstring'''
def identity_function(__UpperCamelCase : float ) -> float:
return x
snake_case_ : int = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : str = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print("""******************""" )
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
def function_to_integrate(__UpperCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
snake_case_ : List[Any] = area_under_curve_estimator(
__UpperCamelCase , __UpperCamelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 | 1 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Optional[Any]:
'''simple docstring'''
a : Any = []
a : List[str] = set({"(", "[", "{"} )
a : int = set({")", "]", "}"} )
a : int = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowercase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowercase ) == 0 or (len(_lowercase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowercase ) == 0
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : Any = input("Enter sequence of brackets: " )
if is_balanced(_lowercase ):
print(_lowercase , "is balanced" )
else:
print(_lowercase , "is not balanced" )
if __name__ == "__main__":
main()
| 633 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCamelCase__ = name.replace('''img_encoder.pos_embed''' ,'''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase__ = name.replace('''img_encoder.patch_embed.proj''' ,'''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase__ = name.replace('''img_encoder.patch_embed.norm''' ,'''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
lowerCamelCase__ = name.replace('''img_encoder.layers''' ,'''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
lowerCamelCase__ = name.replace('''blocks''' ,'''layers''' )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase__ = name.replace('''attn''' ,'''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase__ = name.replace('''proj''' ,'''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase__ = name.replace('''pre_assign_attn.attn.proj''' ,'''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
lowerCamelCase__ = name.replace('''norm1''' ,'''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase__ = name.replace('''norm2''' ,'''layer_norm2''' )
if "img_encoder.norm" in name:
lowerCamelCase__ = name.replace('''img_encoder.norm''' ,'''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase__ = name.replace('''text_encoder.token_embedding''' ,'''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
lowerCamelCase__ = name.replace('''text_encoder.positional_embedding''' ,'''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase__ = name.replace('''text_encoder.transformer.resblocks.''' ,'''text_model.encoder.layers.''' )
if "ln_1" in name:
lowerCamelCase__ = name.replace('''ln_1''' ,'''layer_norm1''' )
if "ln_2" in name:
lowerCamelCase__ = name.replace('''ln_2''' ,'''layer_norm2''' )
if "c_fc" in name:
lowerCamelCase__ = name.replace('''c_fc''' ,'''fc1''' )
if "c_proj" in name:
lowerCamelCase__ = name.replace('''c_proj''' ,'''fc2''' )
if "text_encoder" in name:
lowerCamelCase__ = name.replace('''text_encoder''' ,'''text_model''' )
if "ln_final" in name:
lowerCamelCase__ = name.replace('''ln_final''' ,'''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase__ = name.replace('''img_projector.linear_hidden.''' ,'''visual_projection.''' )
if "img_projector.linear_out." in name:
lowerCamelCase__ = name.replace('''img_projector.linear_out.''' ,'''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
lowerCamelCase__ = name.replace('''text_projector.linear_hidden''' ,'''text_projection''' )
if "text_projector.linear_out" in name:
lowerCamelCase__ = name.replace('''text_projector.linear_out''' ,'''text_projection.3''' )
return name
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase__ = orig_state_dict.pop(__snake_case )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase__ = key.split('''.''' )
lowerCamelCase__ , lowerCamelCase__ = int(key_split[2] ), int(key_split[4] )
lowerCamelCase__ = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase__ = val[:dim, :]
lowerCamelCase__ = val[dim : dim * 2, :]
lowerCamelCase__ = val[-dim:, :]
else:
lowerCamelCase__ = val[:dim]
lowerCamelCase__ = val[dim : dim * 2]
lowerCamelCase__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase__ = key.split('''.''' )
lowerCamelCase__ = int(key_split[3] )
lowerCamelCase__ = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase__ = val[:dim, :]
lowerCamelCase__ = val[
dim : dim * 2, :
]
lowerCamelCase__ = val[-dim:, :]
else:
lowerCamelCase__ = val[:dim]
lowerCamelCase__ = val[dim : dim * 2]
lowerCamelCase__ = val[-dim:]
else:
lowerCamelCase__ = rename_key(__snake_case )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase__ = val.squeeze_()
else:
lowerCamelCase__ = val
return orig_state_dict
def lowerCAmelCase__() -> Tuple:
'''simple docstring'''
lowerCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case="groupvit-gcc-yfcc" ,__snake_case=False ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = GroupViTConfig()
lowerCamelCase__ = GroupViTModel(__snake_case ).eval()
lowerCamelCase__ = torch.load(__snake_case ,map_location='''cpu''' )['''model''']
lowerCamelCase__ = convert_state_dict(__snake_case ,__snake_case )
lowerCamelCase__ , lowerCamelCase__ = model.load_state_dict(__snake_case ,strict=__snake_case )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__snake_case ) == 0)
# verify result
lowerCamelCase__ = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = processor(text=['''a photo of a cat''', '''a photo of a dog'''] ,images=__snake_case ,padding=__snake_case ,return_tensors='''pt''' )
with torch.no_grad():
lowerCamelCase__ = model(**__snake_case )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase__ = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase__ = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image ,__snake_case ,atol=1E-3 )
processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
print('''Successfully saved processor and model to''' ,__snake_case )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(__snake_case ,organization='''nielsr''' )
model.push_to_hub(__snake_case ,organization='''nielsr''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
_a = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 29 |
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __A ( lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase_ = 42
class __A ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 1_6 , __lowerCAmelCase = 8_8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 3_2 , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = "geglu" , __lowerCAmelCase = True , __lowerCAmelCase = True , ):
'''simple docstring'''
super().__init__()
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = attention_head_dim
lowerCamelCase__ = num_attention_heads * attention_head_dim
lowerCamelCase__ = in_channels
lowerCamelCase__ = torch.nn.GroupNorm(num_groups=__lowerCAmelCase , num_channels=__lowerCAmelCase , eps=1E-6 , affine=__lowerCAmelCase )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
# 3. Define transformers blocks
lowerCamelCase__ = nn.ModuleList(
[
BasicTransformerBlock(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , dropout=__lowerCAmelCase , cross_attention_dim=__lowerCAmelCase , activation_fn=__lowerCAmelCase , attention_bias=__lowerCAmelCase , double_self_attention=__lowerCAmelCase , norm_elementwise_affine=__lowerCAmelCase , )
for d in range(__lowerCAmelCase )
] )
lowerCamelCase__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase = True , ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = hidden_states.shape
lowerCamelCase__ = batch_frames // num_frames
lowerCamelCase__ = hidden_states
lowerCamelCase__ = hidden_states[None, :].reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCamelCase__ = self.norm(__lowerCAmelCase )
lowerCamelCase__ = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = self.proj_in(__lowerCAmelCase )
# 2. Blocks
for block in self.transformer_blocks:
lowerCamelCase__ = block(
__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , timestep=__lowerCAmelCase , cross_attention_kwargs=__lowerCAmelCase , class_labels=__lowerCAmelCase , )
# 3. Output
lowerCamelCase__ = self.proj_out(__lowerCAmelCase )
lowerCamelCase__ = (
hidden_states[None, None, :]
.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCamelCase__ = hidden_states.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowerCamelCase__ = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__lowerCAmelCase )
| 29 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = 2000 , __UpperCAmelCase = None , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.unet.config.sample_size
__lowerCamelCase = (batch_size, 3, img_size, img_size)
__lowerCamelCase = self.unet
__lowerCamelCase = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase ) * self.scheduler.init_noise_sigma
__lowerCamelCase = sample.to(self.device )
self.scheduler.set_timesteps(__UpperCAmelCase )
self.scheduler.set_sigmas(__UpperCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__lowerCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__lowerCamelCase = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
__lowerCamelCase = self.scheduler.step_correct(__UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
# prediction step
__lowerCamelCase = model(__UpperCAmelCase , __UpperCAmelCase ).sample
__lowerCamelCase = self.scheduler.step_pred(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase = output.prev_sample, output.prev_sample_mean
__lowerCamelCase = sample_mean.clamp(0 , 1 )
__lowerCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 175 |
def a__ ( _UpperCamelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
__lowerCamelCase = sorted(string.lower() )
return len(_UpperCamelCase ) == len(set(_UpperCamelCase ) )
if __name__ == "__main__":
a_ = input("""Enter a string """).strip()
a_ = is_isogram(input_str)
print(f"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 175 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
a : Optional[Any] = logging.get_logger(__name__)
class a_ ( _UpperCAmelCase ):
a : Tuple = ['pixel_values']
def __init__( self : Optional[int] , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Dict[str, int]] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_55 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : List[str] , ) ->None:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 2_56}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , param_name="""crop_size""" )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self : List[str] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Tuple , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : float , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Optional[Any] ) ->np.ndarray:
'''simple docstring'''
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , ) ->np.ndarray:
'''simple docstring'''
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : ImageInput , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[float] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCamelCase : Optional[int] , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , param_name="""crop_size""" )
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_UpperCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Tuple] = None ) ->int:
'''simple docstring'''
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__UpperCamelCase ):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(__UpperCamelCase ) ):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__UpperCamelCase )
_UpperCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__UpperCamelCase )
else:
_UpperCAmelCase = logits.argmax(dim=1 )
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 718 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase ( _A , _A=False ) -> str:
"""simple docstring"""
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
a : Union[str, Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
a : Tuple = parse_flag_from_env('''RUN_REMOTE''', default=False)
a : Union[str, Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
a : int = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
a : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
a : List[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
a : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
a : int = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
a : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
a : Any = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
a : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires faiss""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires regex""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires elasticsearch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires sqlalchemy""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires PyTorch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires TensorFlow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
if not config.JAX_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires JAX""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not config.PIL_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires Pillow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
def _require_spacy_model(_A ):
try:
import spacy # noqa F401
spacy.load(_A )
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(_A ) )(_A )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCAmelCase = unittest.skip("""test is slow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
_UpperCAmelCase = unittest.skip("""test is local""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCAmelCase = unittest.skip("""test is packaged""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCAmelCase = unittest.skip("""test requires remote""" )(_A )
return test_case
def _UpperCamelCase ( *_A ) -> Dict:
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_A ) and name.startswith("""test""" ):
for decorator in decorators:
_UpperCAmelCase = decorator(_A )
setattr(cls , _A , _A )
return cls
return decorate
class a_ ( _UpperCAmelCase ):
pass
class a_ ( _UpperCAmelCase ):
a : Any = 0
a : Optional[Any] = 1
a : int = 2
@contextmanager
def _UpperCamelCase ( _A=OfflineSimulationMode.CONNECTION_FAILS , _A=1e-16 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = requests.Session().request
def timeout_request(_A , _A , _A , **_A ):
# Change the url to an invalid url so that the connection hangs
_UpperCAmelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_UpperCAmelCase = timeout
try:
return online_request(_A , _A , **_A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCAmelCase = url
_UpperCAmelCase = e.args[0]
_UpperCAmelCase = (max_retry_error.args[0].replace("""10.255.255.1""" , F"""OfflineMock[{url}]""" ),)
_UpperCAmelCase = (max_retry_error,)
raise
def raise_connection_error(_A , _A , **_A ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=_A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , _A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , _A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , _A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _UpperCamelCase ( *_A , **_A ) -> str:
"""simple docstring"""
_UpperCAmelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_A , **_A ) as tmp_dir:
try:
os.chdir(_A )
yield
finally:
os.chdir(_A )
@contextmanager
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
return deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist()
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_A , *_A , **_A ):
try:
return func(*_A , **_A )
except HTTPError as err:
if str(_A ).startswith("""500""" ) or str(_A ).startswith("""502""" ):
pytest.xfail(str(_A ) )
raise err
return decorator.decorator(_wrapper , _A )
class a_ :
def __init__( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_A )
else:
break
async def _UpperCamelCase ( _A , _A=None , _A=None , _A=None , _A=False , _A=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(_A ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_A , _A , _A , _A="" ):
_UpperCAmelCase = line.decode("""utf-8""" ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label="""stderr:""" ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def _UpperCamelCase ( _A , _A=None , _A=None , _A=1_8_0 , _A=False , _A=True ) -> _RunOutput:
"""simple docstring"""
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
_UpperCAmelCase = """ """.join(_A )
if result.returncode > 0:
_UpperCAmelCase = """\n""".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
_UpperCAmelCase = re.sub(R"""^gw""" , """""" , _A , 0 , re.M )
return int(_A )
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 2_9_5_0_0
_UpperCAmelCase = pytest_xdist_worker_id()
return port + uniq_delta | 19 | 0 |
"""simple docstring"""
from __future__ import annotations
import os
from collections.abc import Mapping
A_ = tuple[int, int]
class __lowerCamelCase :
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
lowerCamelCase_ = vertices
lowerCamelCase_ = {
(min(UpperCAmelCase ), max(UpperCAmelCase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCamelCase_ = weight
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = Graph({min(self.vertices )} , {} )
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCamelCase_ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCamelCase_ = edge
lowerCamelCase_ = weight
subgraph.add_edge(UpperCAmelCase , UpperCAmelCase )
return subgraph
def lowercase ( lowerCAmelCase__ = "p107_network.txt" ):
lowerCamelCase_ = os.path.abspath(os.path.dirname(lowerCAmelCase__ ) )
lowerCamelCase_ = os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ )
lowerCamelCase_ = {}
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
with open(lowerCAmelCase__ ) as f:
lowerCamelCase_ = f.read().strip().split('''\n''' )
lowerCamelCase_ = [line.split(''',''' ) for line in data]
for edgea in range(1 ,len(lowerCAmelCase__ ) ):
for edgea in range(lowerCAmelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCamelCase_ = int(adjaceny_matrix[edgea][edgea] )
lowerCamelCase_ = Graph(set(range(len(lowerCAmelCase__ ) ) ) ,lowerCAmelCase__ )
lowerCamelCase_ = graph.prims_algorithm()
lowerCamelCase_ = sum(graph.edges.values() )
lowerCamelCase_ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 29 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCamelCase ( lowerCAmelCase , unittest.TestCase ):
a__: Tuple = RoCBertTokenizer
a__: int = None
a__: Optional[Any] = False
a__: Optional[int] = True
a__: Tuple = filter_non_english
def UpperCAmelCase__ ( self ):
super().setUp()
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
lowerCamelCase_ = {}
lowerCamelCase_ = {}
for i, value in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = i
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(UpperCAmelCase , UpperCAmelCase , ensure_ascii=UpperCAmelCase )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(UpperCAmelCase , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , strip_accents=UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = RoCBertBasicTokenizer(do_lower_case=UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowerCamelCase_ = {}
for i, token in enumerate(UpperCAmelCase ):
lowerCamelCase_ = i
lowerCamelCase_ = RoCBertWordpieceTokenizer(vocab=UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
lowerCamelCase_ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(UpperCAmelCase ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def UpperCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCamelCase_ = tokenizer_r.encode_plus(
UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , add_special_tokens=UpperCAmelCase , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase , '''do_lower_case''' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = ['''的''', '''人''', '''有''']
lowerCamelCase_ = ''''''.join(UpperCAmelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase )
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCamelCase_ = tokenizer.encode('''你好''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode('''你是谁''' , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCAmelCase__ ( self ):
lowerCamelCase_ = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCamelCase_ = '''你好,你是谁'''
lowerCamelCase_ = tokenizer.tokenize(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_shape_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.convert_tokens_to_pronunciation_ids(UpperCAmelCase )
lowerCamelCase_ = tokenizer.prepare_for_model(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , add_special_tokens=UpperCAmelCase )
lowerCamelCase_ = tokenizer.encode_plus(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 29 | 1 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {
"nielsr/canine-s": 2_048,
}
# Unicode defines 1,114,112 total “codepoints”
__lowerCamelCase : int = 1_114_112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
__lowerCamelCase : Dict = 0
__lowerCamelCase : int = 0xE_0_0_0
__lowerCamelCase : List[Any] = 0xE_0_0_1
__lowerCamelCase : List[Any] = 0xE_0_0_2
__lowerCamelCase : Optional[Any] = 0xE_0_0_3
__lowerCamelCase : List[Any] = 0xE_0_0_4
# Maps special codepoints to human-readable names.
__lowerCamelCase : Tuple = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
__lowerCamelCase : Optional[Any] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class __magic_name__ ( UpperCAmelCase__ ):
lowercase : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any=chr(UpperCamelCase__ ) , UpperCamelCase__ : List[str]=chr(UpperCamelCase__ ) , UpperCamelCase__ : List[str]=chr(UpperCamelCase__ ) , UpperCamelCase__ : Dict=chr(UpperCamelCase__ ) , UpperCamelCase__ : Tuple=chr(UpperCamelCase__ ) , UpperCamelCase__ : Any=chr(UpperCamelCase__ ) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Any=20_48 , **UpperCamelCase__ : Any , ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
UpperCAmelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
UpperCAmelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
UpperCAmelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
UpperCAmelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , model_max_length=UpperCamelCase__ , **UpperCamelCase__ , )
# Creates a mapping for looking up the IDs of special symbols.
UpperCAmelCase = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
UpperCAmelCase = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
UpperCAmelCase = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
UpperCAmelCase = UNICODE_VOCAB_SIZE
UpperCAmelCase = len(self._special_codepoints )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
'''simple docstring'''
return self._unicode_vocab_size
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : str ) -> List[str]:
'''simple docstring'''
return list(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCamelCase__ : str ) -> int:
'''simple docstring'''
try:
return ord(UpperCamelCase__ )
except TypeError:
raise ValueError(F'invalid token: \'{token}\'' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCamelCase__ : int ) -> str:
'''simple docstring'''
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(UpperCamelCase__ )
except TypeError:
raise ValueError(F'invalid id: {index}' )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return "".join(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
UpperCAmelCase = [1] + ([0] * len(UpperCamelCase__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(UpperCamelCase__ )) + [1]
return result
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> List[str]:
'''simple docstring'''
return ()
| 714 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
class __magic_name__ ( A__ ):
lowercase : Tuple =['''pixel_values''']
def __init__( self : Any , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : float = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 2_55 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[str] , ) -> None:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
UpperCAmelCase = size if size is not None else {"shortest_edge": 3_84}
UpperCAmelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCAmelCase = do_resize
UpperCAmelCase = size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase = crop_pct if crop_pct is not None else 2_24 / 2_56
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : float , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
UpperCAmelCase = size["shortest_edge"]
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase = int(shortest_edge / crop_pct )
UpperCAmelCase = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCAmelCase = resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=UpperCamelCase__ , size=(shortest_edge, shortest_edge) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
UpperCamelCase__ , size=(shortest_edge, shortest_edge) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : float = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Any , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCAmelCase = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , crop_pct=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 457 | 0 |
from __future__ import annotations
import requests
def _lowercase ( __lowerCamelCase : List[str] ) -> dict:
'''simple docstring'''
UpperCamelCase__ : Dict = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(__lowerCamelCase ).json()
def _lowercase ( __lowerCamelCase : str = 10 ) -> list[dict]:
'''simple docstring'''
UpperCamelCase__ : str = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
UpperCamelCase__ : str = requests.get(__lowerCamelCase ).json()[:max_stories]
return [get_hackernews_story(__lowerCamelCase ) for story_id in story_ids]
def _lowercase ( __lowerCamelCase : Optional[Any] = 10 ) -> str:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = hackernews_top_stories(__lowerCamelCase )
return "\n".join('''* [{title}]({url})'''.format(**__lowerCamelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 344 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> bool:
snake_case : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
snake_case : set[int] = set()
return any(
node not in visited and depth_first_search(lowercase ,lowercase ,lowercase ,lowercase )
for node in graph )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> bool:
visited.add(lowercase )
rec_stk.add(lowercase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowercase ,lowercase ,lowercase ,lowercase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowercase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 587 | 0 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __A ( enum.Enum ):
__A = 0
__A = 1
__A = 2
@add_end_docstrings(lowercase__ )
class __A ( lowercase__ ):
__A = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == """tf""" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCamelCase =None
if self.model.config.prefix is not None:
lowerCamelCase =self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCamelCase =self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCamelCase , lowerCamelCase , lowerCamelCase =self._sanitize_parameters(prefix=UpperCAmelCase_ , **self._forward_params )
lowerCamelCase ={**self._preprocess_params, **preprocess_params}
lowerCamelCase ={**self._forward_params, **forward_params}
def _snake_case ( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
lowerCamelCase ={}
if prefix is not None:
lowerCamelCase =prefix
if prefix:
lowerCamelCase =self.tokenizer(
UpperCAmelCase_ , padding=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=self.framework )
lowerCamelCase =prefix_inputs["""input_ids"""].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
""" [None, 'hole']""" )
lowerCamelCase =handle_long_generation
preprocess_params.update(UpperCAmelCase_ )
lowerCamelCase =generate_kwargs
lowerCamelCase ={}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_full_text`""" )
if return_tensors is not None:
raise ValueError("""`return_full_text` is mutually exclusive with `return_tensors`""" )
lowerCamelCase =ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("""`return_text` is mutually exclusive with `return_tensors`""" )
lowerCamelCase =ReturnType.TENSORS
if return_type is not None:
lowerCamelCase =return_type
if clean_up_tokenization_spaces is not None:
lowerCamelCase =clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCamelCase =self.tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
lowerCamelCase =stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"""add_space_before_punct_symbol""": True} )
return super()._parse_and_tokenize(*UpperCAmelCase_ , **UpperCAmelCase_ )
def __call__( self , UpperCAmelCase_ , **UpperCAmelCase_ ):
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_="" , UpperCAmelCase_=None , **UpperCAmelCase_ ):
lowerCamelCase =self.tokenizer(
prefix + prompt_text , padding=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=self.framework )
lowerCamelCase =prompt_text
if handle_long_generation == "hole":
lowerCamelCase =inputs["""input_ids"""].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCamelCase =generate_kwargs["""max_new_tokens"""]
else:
lowerCamelCase =generate_kwargs.get("""max_length""" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("""We cannot infer how many new tokens are expected""" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCamelCase =self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"""We cannot use `hole` to handle this generation the number of desired tokens exceeds the"""
""" models max length""" )
lowerCamelCase =inputs["""input_ids"""][:, -keep_length:]
if "attention_mask" in inputs:
lowerCamelCase =inputs["""attention_mask"""][:, -keep_length:]
return inputs
def _snake_case ( self , UpperCAmelCase_ , **UpperCAmelCase_ ):
lowerCamelCase =model_inputs["""input_ids"""]
lowerCamelCase =model_inputs.get("""attention_mask""" , UpperCAmelCase_ )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCamelCase =None
lowerCamelCase =None
lowerCamelCase =1
else:
lowerCamelCase =input_ids.shape[0]
lowerCamelCase =model_inputs.pop("""prompt_text""" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCamelCase =generate_kwargs.pop("""prefix_length""" , 0 )
if prefix_length > 0:
lowerCamelCase ="""max_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCamelCase =generate_kwargs.get("""max_length""" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCamelCase ="""min_new_tokens""" in generate_kwargs or (
"""generation_config""" in generate_kwargs
and generate_kwargs["""generation_config"""].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCamelCase =self.model.generate(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase =generated_sequence.shape[0]
if self.framework == "pt":
lowerCamelCase =generated_sequence.reshape(UpperCAmelCase_ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCamelCase =tf.reshape(UpperCAmelCase_ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=ReturnType.FULL_TEXT , UpperCAmelCase_=True ):
lowerCamelCase =model_outputs["""generated_sequence"""][0]
lowerCamelCase =model_outputs["""input_ids"""]
lowerCamelCase =model_outputs["""prompt_text"""]
lowerCamelCase =generated_sequence.numpy().tolist()
lowerCamelCase =[]
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCamelCase ={"""generated_token_ids""": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCamelCase =self.tokenizer.decode(
UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCamelCase =0
else:
lowerCamelCase =len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ , ) )
if return_type == ReturnType.FULL_TEXT:
lowerCamelCase =prompt_text + text[prompt_length:]
else:
lowerCamelCase =text[prompt_length:]
lowerCamelCase ={"""generated_text""": all_text}
records.append(UpperCAmelCase_ )
return records
| 701 |
from typing import List
from .keymap import KEYMAP, get_character
def _lowercase ( _UpperCAmelCase ) -> Tuple:
def decorator(_UpperCAmelCase ):
lowerCamelCase =getattr(_UpperCAmelCase , """handle_key""" , [] )
handle += [key]
setattr(_UpperCAmelCase , """handle_key""" , _UpperCAmelCase )
return func
return decorator
def _lowercase ( *_UpperCAmelCase ) -> Tuple:
def decorator(_UpperCAmelCase ):
lowerCamelCase =getattr(_UpperCAmelCase , """handle_key""" , [] )
handle += keys
setattr(_UpperCAmelCase , """handle_key""" , _UpperCAmelCase )
return func
return decorator
class __A ( a ):
def __new__( cls , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =super().__new__(cls , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if not hasattr(UpperCAmelCase_ , """key_handler""" ):
setattr(UpperCAmelCase_ , """key_handler""" , {} )
setattr(UpperCAmelCase_ , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
lowerCamelCase =getattr(UpperCAmelCase_ , """handle_key""" , [] )
for key in handled_keys:
lowerCamelCase =value
return new_cls
@staticmethod
def _snake_case ( cls ):
lowerCamelCase =get_character()
if char != KEYMAP["undefined"]:
lowerCamelCase =ord(UpperCAmelCase_ )
lowerCamelCase =cls.key_handler.get(UpperCAmelCase_ )
if handler:
lowerCamelCase =char
return handler(cls )
else:
return None
def _lowercase ( cls ) -> List[str]:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 269 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""]
__SCREAMING_SNAKE_CASE = """LayoutLMv3ImageProcessor"""
__SCREAMING_SNAKE_CASE = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self : str , a_ : Dict=None , a_ : Dict=None , **a_ : Optional[int] ):
"""simple docstring"""
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
__snake_case = kwargs.pop("feature_extractor" )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
def __call__( self : Any , a_ : Union[str, Any] , a_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a_ : Union[List[List[int]], List[List[List[int]]]] = None , a_ : Optional[Union[List[int], List[List[int]]]] = None , a_ : bool = True , a_ : Union[bool, str, PaddingStrategy] = False , a_ : Union[bool, str, TruncationStrategy] = None , a_ : Optional[int] = None , a_ : int = 0 , a_ : Optional[int] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = True , a_ : Optional[Union[str, TensorType]] = None , **a_ : Tuple , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
__snake_case = self.image_processor(images=a_ , return_tensors=a_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a_ , a_ ):
__snake_case = [text] # add batch dimension (as the image processor always adds a batch dimension)
__snake_case = features["words"]
__snake_case = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
# add pixel values
__snake_case = features.pop("pixel_values" )
if return_overflowing_tokens is True:
__snake_case = self.get_overflowing_images(a_ , encoded_inputs["overflow_to_sample_mapping"] )
__snake_case = images
return encoded_inputs
def A ( self : List[Any] , a_ : int , a_ : int ):
"""simple docstring"""
__snake_case = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a_ ) != len(a_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f''' {len(a_ )} and {len(a_ )}''' )
return images_with_overflow
def A ( self : str , *a_ : str , **a_ : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def A ( self : str , *a_ : int , **a_ : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
def A ( self : Dict ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def A ( self : int ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def A ( self : Dict ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor
| 69 | import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( lowercase__ : int = 3 ):
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(lowercase__ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
SCREAMING_SNAKE_CASE__ : Tuple = QuantumRegister(lowercase__ , 'qr' )
SCREAMING_SNAKE_CASE__ : int = ClassicalRegister(lowercase__ , 'cr' )
SCREAMING_SNAKE_CASE__ : Tuple = QuantumCircuit(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = number_of_qubits
for i in range(lowercase__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowercase__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowercase__ , lowercase__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowercase__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowercase__ , lowercase__ )
# simulate with 10000 shots
SCREAMING_SNAKE_CASE__ : Optional[int] = Aer.get_backend('qasm_simulator' )
SCREAMING_SNAKE_CASE__ : Tuple = execute(lowercase__ , lowercase__ , shots=1_00_00 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 85 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCAmelCase : int = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = """retribert"""
def __init__( self : int , UpperCamelCase : List[str]=30_522 , UpperCamelCase : Optional[Any]=768 , UpperCamelCase : Dict=8 , UpperCamelCase : str=12 , UpperCamelCase : Optional[int]=3_072 , UpperCamelCase : Union[str, Any]="gelu" , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : Dict=512 , UpperCamelCase : List[str]=2 , UpperCamelCase : Any=0.02 , UpperCamelCase : List[str]=1e-1_2 , UpperCamelCase : Tuple=True , UpperCamelCase : str=128 , UpperCamelCase : Optional[Any]=0 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = type_vocab_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : int = share_encoders
__UpperCAmelCase : Any = projection_dim
| 299 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
UpperCAmelCase : str = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Any = {}
with open(_UpperCamelCase , """r""" ) as file:
for line_number, line in enumerate(_UpperCamelCase ):
__UpperCAmelCase : List[Any] = line.strip()
if line:
__UpperCAmelCase : List[Any] = line.split()
__UpperCAmelCase : List[str] = line_number
__UpperCAmelCase : List[str] = words[0]
__UpperCAmelCase : Dict = value
return result
def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split(""".""" ):
__UpperCAmelCase : Any = getattr(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_UpperCamelCase ):
__UpperCAmelCase : Optional[int] = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCAmelCase : Tuple = """param"""
if weight_type is not None and weight_type != "param":
__UpperCAmelCase : Dict = getattr(_UpperCamelCase , _UpperCamelCase ).shape
elif weight_type is not None and weight_type == "param":
__UpperCAmelCase : Any = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__UpperCAmelCase : Union[str, Any] = getattr(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : Optional[Any] = shape_pointer.shape
# let's reduce dimension
__UpperCAmelCase : Dict = value[0]
else:
__UpperCAmelCase : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__UpperCAmelCase : int = value
elif weight_type == "weight_g":
__UpperCAmelCase : Optional[int] = value
elif weight_type == "weight_v":
__UpperCAmelCase : int = value
elif weight_type == "bias":
__UpperCAmelCase : Any = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__UpperCAmelCase : Dict = getattr(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : List[str] = value
else:
__UpperCAmelCase : List[Any] = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : str ) -> int:
'''simple docstring'''
__UpperCAmelCase : int = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_UpperCamelCase ):
__UpperCAmelCase : Tuple = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCAmelCase : int = """param"""
if weight_type is not None and weight_type != "param":
__UpperCAmelCase : Optional[int] = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCAmelCase : Optional[Any] = """.""".join([key, hf_param_name] )
else:
__UpperCAmelCase : List[str] = key
__UpperCAmelCase : Tuple = value if """lm_head""" in full_key else value[0]
UpperCAmelCase : Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : Any=None , _UpperCamelCase : str=None ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : List[Any] = False
for key, mapped_key in MAPPING.items():
__UpperCAmelCase : Optional[Any] = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCAmelCase : Dict = True
if "*" in mapped_key:
__UpperCAmelCase : str = name.split(_UpperCamelCase )[0].split(""".""" )[-2]
__UpperCAmelCase : Dict = mapped_key.replace("""*""" , _UpperCamelCase )
if "weight_g" in name:
__UpperCAmelCase : List[Any] = """weight_g"""
elif "weight_v" in name:
__UpperCAmelCase : List[str] = """weight_v"""
elif "bias" in name:
__UpperCAmelCase : Any = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCAmelCase : Dict = """weight"""
else:
__UpperCAmelCase : Optional[Any] = None
if hf_dict is not None:
rename_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
else:
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return is_used
return is_used
def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : str ) -> Any:
'''simple docstring'''
__UpperCAmelCase : str = []
__UpperCAmelCase : Dict = fairseq_model.state_dict()
__UpperCAmelCase : Optional[Any] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCAmelCase : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , )
__UpperCAmelCase : List[Any] = True
else:
__UpperCAmelCase : Any = load_wavaveca_layer(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = full_name.split("""conv_layers.""" )[-1]
__UpperCAmelCase : Dict = name.split(""".""" )
__UpperCAmelCase : int = int(items[0] )
__UpperCAmelCase : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__UpperCAmelCase : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__UpperCAmelCase : List[Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__UpperCAmelCase : int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__UpperCAmelCase : List[Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Dict=False ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
__UpperCAmelCase : List[str] = WavaVecaConfig.from_pretrained(_UpperCamelCase )
else:
__UpperCAmelCase : List[Any] = WavaVecaConfig()
if is_seq_class:
__UpperCAmelCase : List[Any] = read_txt_into_dict(_UpperCamelCase )
__UpperCAmelCase : str = idalabel
__UpperCAmelCase : Dict = WavaVecaForSequenceClassification(_UpperCamelCase )
__UpperCAmelCase : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
feature_extractor.save_pretrained(_UpperCamelCase )
elif is_finetuned:
if dict_path:
__UpperCAmelCase : Union[str, Any] = Dictionary.load(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCAmelCase : Optional[Any] = target_dict.pad_index
__UpperCAmelCase : Union[str, Any] = target_dict.bos_index
__UpperCAmelCase : Optional[int] = target_dict.eos_index
__UpperCAmelCase : str = len(target_dict.symbols )
__UpperCAmelCase : List[Any] = os.path.join(_UpperCamelCase , """vocab.json""" )
if not os.path.isdir(_UpperCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
__UpperCAmelCase : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCAmelCase : Dict = 0
__UpperCAmelCase : Union[str, Any] = 1
with open(_UpperCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(_UpperCamelCase , _UpperCamelCase )
__UpperCAmelCase : int = WavaVecaCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_UpperCamelCase , )
__UpperCAmelCase : Union[str, Any] = True if config.feat_extract_norm == """layer""" else False
__UpperCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
__UpperCAmelCase : Any = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
__UpperCAmelCase : int = WavaVecaForCTC(_UpperCamelCase )
else:
__UpperCAmelCase : str = WavaVecaForPreTraining(_UpperCamelCase )
if is_finetuned or is_seq_class:
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__UpperCAmelCase : Tuple = argparse.Namespace(task="""audio_pretraining""" )
__UpperCAmelCase : Dict = fairseq.tasks.setup_task(_UpperCamelCase )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_UpperCamelCase )
__UpperCAmelCase : Optional[Any] = model[0].eval()
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
UpperCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
UpperCAmelCase : Union[str, Any] = parser.parse_args()
UpperCAmelCase : Optional[int] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 299 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase =[
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
UpperCAmelCase ={
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase ={f"""funnel-transformer/{name}""": 512 for name in _model_names}
UpperCAmelCase ={f"""funnel-transformer/{name}""": {"do_lower_case": True} for name in _model_names}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = FunnelTokenizer
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = 2
def __init__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,lowerCamelCase_=True ,lowerCamelCase_="<unk>" ,lowerCamelCase_="<sep>" ,lowerCamelCase_="<pad>" ,lowerCamelCase_="<cls>" ,lowerCamelCase_="<mask>" ,lowerCamelCase_="<s>" ,lowerCamelCase_="</s>" ,lowerCamelCase_=True ,lowerCamelCase_=True ,lowerCamelCase_=None ,lowerCamelCase_="##" ,**lowerCamelCase_ ,) -> List[Any]:
super().__init__(
lowerCamelCase_ ,tokenizer_file=lowerCamelCase_ ,do_lower_case=lowerCamelCase_ ,unk_token=lowerCamelCase_ ,sep_token=lowerCamelCase_ ,pad_token=lowerCamelCase_ ,cls_token=lowerCamelCase_ ,mask_token=lowerCamelCase_ ,bos_token=lowerCamelCase_ ,eos_token=lowerCamelCase_ ,clean_text=lowerCamelCase_ ,tokenize_chinese_chars=lowerCamelCase_ ,strip_accents=lowerCamelCase_ ,wordpieces_prefix=lowerCamelCase_ ,**lowerCamelCase_ ,)
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,lowerCamelCase_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,lowerCamelCase_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,lowerCamelCase_ ) != tokenize_chinese_chars
):
A = getattr(lowerCamelCase_ ,normalizer_state.pop("""type""" ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**lowerCamelCase_ )
A = do_lower_case
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=None ) -> List[Any]:
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ = None ) -> Tuple[str]:
A = self._tokenizer.model.save(lowerCamelCase_ ,name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
| 617 |
"""simple docstring"""
from math import pow, sqrt
def _A ( *_a : float ):
"""simple docstring"""
A = len(_a ) > 0 and all(value > 0.0 for value in values )
return result
def _A ( _a : float , _a : float ):
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_a , _a )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def _A ( _a : float , _a : float , _a : float ):
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_a , _a , _a )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _A ( _a : float , _a : float , _a : float ):
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_a , _a , _a )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _A ( _a : float , _a : float , _a : float ):
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_a , _a , _a )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _A ( _a : float , _a : float , _a : float ):
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_a , _a , _a )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 617 | 1 |
import math
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Dict = len(_lowerCamelCase )
_lowerCamelCase : List[str] = int(math.floor(math.sqrt(_lowerCamelCase ) ) )
_lowerCamelCase : int = 0
while arr[min(_lowerCamelCase , _lowerCamelCase ) - 1] < x:
_lowerCamelCase : Dict = step
step += int(math.floor(math.sqrt(_lowerCamelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
_lowerCamelCase : str = prev + 1
if prev == min(_lowerCamelCase , _lowerCamelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_lowerCAmelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(''',''')]
_lowerCAmelCase : Optional[Any] = int(input('''Enter the number to be searched:\n'''))
_lowerCAmelCase : Optional[int] = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f'''Number {x} is at index {res}''') | 718 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 386 | 0 |
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Mock()
__SCREAMING_SNAKE_CASE = conn, Mock()
__SCREAMING_SNAKE_CASE = iter([1, None] )
__SCREAMING_SNAKE_CASE = lambda lowerCAmelCase_ : next(lowerCAmelCase_ )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=lowerCAmelCase_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 682 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any]=1_3 , UpperCAmelCase__ : int=7 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Tuple=9_9 , UpperCAmelCase__ : Dict=3_2 , UpperCAmelCase__ : int=5 , UpperCAmelCase__ : str=4 , UpperCAmelCase__ : List[Any]=3_7 , UpperCAmelCase__ : int="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=1_2_8 , UpperCAmelCase__ : Union[str, Any]=3_2 , UpperCAmelCase__ : Any=1_6 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : str=3 , UpperCAmelCase__ : Tuple=4 , UpperCAmelCase__ : List[str]=None , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = seq_length
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_input_mask
__SCREAMING_SNAKE_CASE = use_token_type_ids
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = num_choices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase_ ( self : str ) -> Any:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE = NezhaModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , ) -> Tuple:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = NezhaModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> int:
__SCREAMING_SNAKE_CASE = NezhaForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any ) -> Tuple:
__SCREAMING_SNAKE_CASE = NezhaForNextSentencePrediction(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> List[str]:
__SCREAMING_SNAKE_CASE = NezhaForPreTraining(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , next_sentence_label=UpperCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = NezhaForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = NezhaForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Any:
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = NezhaForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict ) -> str:
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = NezhaForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Tuple = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : int = True
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any]=False ) -> Dict:
__SCREAMING_SNAKE_CASE = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
__SCREAMING_SNAKE_CASE = NezhaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
# This regression test was failing with PyTorch < 1.3
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__SCREAMING_SNAKE_CASE = None
self.model_tester.create_and_check_model_as_decoder(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] ) -> Dict:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> int:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@slow
@require_torch_gpu
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(config=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.jit.trace(
UpperCAmelCase__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , "bert.pt" ) )
__SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(UpperCAmelCase__ , "bert.pt" ) , map_location=UpperCAmelCase__ )
loaded(inputs_dict["input_ids"].to(UpperCAmelCase__ ) , inputs_dict["attention_mask"].to(UpperCAmelCase__ ) )
@require_torch
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> str:
__SCREAMING_SNAKE_CASE = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
__SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
| 682 | 1 |
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
snake_case_ : Optional[int] = datasets.logging.get_logger(__name__)
snake_case_ : Tuple = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
snake_case_ : List[str] = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
snake_case_ : Optional[Any] = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
snake_case_ : Any = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def __UpperCamelCase ( self : List[str] , _a : Optional[Any] ) -> int:
"""simple docstring"""
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
_SCREAMING_SNAKE_CASE ='''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
_SCREAMING_SNAKE_CASE =self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_SCREAMING_SNAKE_CASE =self.config_name.upper()
else:
raise KeyError(
f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
_SCREAMING_SNAKE_CASE =dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_SCREAMING_SNAKE_CASE =score.BleurtScorer(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
def __UpperCamelCase ( self : int , _a : Optional[int] , _a : List[Any] ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.scorer.score(references=UpperCamelCase__ , candidates=UpperCamelCase__ )
return {"scores": scores} | 710 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
snake_case_ : Any = datasets.utils.logging.get_logger(__name__)
snake_case_ : List[Any] = ['''names''', '''prefix''']
snake_case_ : Dict = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
snake_case_ : Dict = ['''encoding_errors''', '''on_bad_lines''']
snake_case_ : Optional[Any] = ['''date_format''']
@dataclass
class A__ ( datasets.BuilderConfig ):
UpperCAmelCase = ","
UpperCAmelCase = None
UpperCAmelCase = "infer"
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = False
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = "."
UpperCAmelCase = None
UpperCAmelCase = '"'
UpperCAmelCase = 0
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = 0
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = None
UpperCAmelCase = 10000
UpperCAmelCase = None
UpperCAmelCase = "strict"
UpperCAmelCase = "error"
UpperCAmelCase = None
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
if self.delimiter is not None:
_SCREAMING_SNAKE_CASE =self.delimiter
if self.column_names is not None:
_SCREAMING_SNAKE_CASE =self.column_names
@property
def __UpperCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE ={
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _a ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A__ ( datasets.ArrowBasedBuilder ):
UpperCAmelCase = CsvConfig
def __UpperCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def __UpperCamelCase ( self : Dict , _a : str ) -> List[str]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
_SCREAMING_SNAKE_CASE =dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
_SCREAMING_SNAKE_CASE =data_files
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =[files]
_SCREAMING_SNAKE_CASE =[dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
_SCREAMING_SNAKE_CASE =[]
for split_name, files in data_files.items():
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =[files]
_SCREAMING_SNAKE_CASE =[dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) )
return splits
def __UpperCamelCase ( self : Tuple , _a : pa.Table ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_SCREAMING_SNAKE_CASE =self.config.features.arrow_schema
if all(not require_storage_cast(_a ) for feature in self.config.features.values() ):
# cheaper cast
_SCREAMING_SNAKE_CASE =pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_a )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_SCREAMING_SNAKE_CASE =table_cast(_a , _a )
return pa_table
def __UpperCamelCase ( self : str , _a : Union[str, Any] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_SCREAMING_SNAKE_CASE =(
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_a ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
_SCREAMING_SNAKE_CASE =pd.read_csv(_a , iterator=_a , dtype=_a , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_a ):
_SCREAMING_SNAKE_CASE =pa.Table.from_pandas(_a )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_a )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(_a )}: {e}" )
raise | 191 | 0 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCamelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=16 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , ) -> int:
__UpperCamelCase :Dict = parent
__UpperCamelCase :Dict = batch_size
__UpperCamelCase :Dict = seq_length
__UpperCamelCase :Tuple = is_training
__UpperCamelCase :Any = use_input_mask
__UpperCamelCase :Union[str, Any] = use_token_type_ids
__UpperCamelCase :Optional[Any] = use_labels
__UpperCamelCase :Optional[int] = vocab_size
__UpperCamelCase :Tuple = hidden_size
__UpperCamelCase :Optional[Any] = num_hidden_layers
__UpperCamelCase :List[Any] = num_attention_heads
__UpperCamelCase :Dict = intermediate_size
__UpperCamelCase :List[Any] = hidden_act
__UpperCamelCase :Tuple = hidden_dropout_prob
__UpperCamelCase :Optional[Any] = attention_probs_dropout_prob
__UpperCamelCase :List[Any] = max_position_embeddings
__UpperCamelCase :List[str] = type_vocab_size
__UpperCamelCase :Union[str, Any] = type_sequence_label_size
__UpperCamelCase :Optional[Any] = initializer_range
__UpperCamelCase :Union[str, Any] = num_labels
__UpperCamelCase :Optional[int] = num_choices
__UpperCamelCase :int = scope
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :Dict = None
if self.use_input_mask:
__UpperCamelCase :str = random_attention_mask([self.batch_size, self.seq_length])
__UpperCamelCase :Union[str, Any] = None
__UpperCamelCase :Union[str, Any] = None
__UpperCamelCase :Dict = None
if self.use_labels:
__UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self) -> Optional[Any]:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Any:
__UpperCamelCase :str = DistilBertModel(config=__UpperCamelCase)
model.to(__UpperCamelCase)
model.eval()
__UpperCamelCase :Dict = model(__UpperCamelCase , __UpperCamelCase)
__UpperCamelCase :Any = model(__UpperCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> List[Any]:
__UpperCamelCase :Dict = DistilBertForMaskedLM(config=__UpperCamelCase)
model.to(__UpperCamelCase)
model.eval()
__UpperCamelCase :int = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Optional[Any]:
__UpperCamelCase :Any = DistilBertForQuestionAnswering(config=__UpperCamelCase)
model.to(__UpperCamelCase)
model.eval()
__UpperCamelCase :List[str] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Dict:
__UpperCamelCase :List[Any] = self.num_labels
__UpperCamelCase :str = DistilBertForSequenceClassification(__UpperCamelCase)
model.to(__UpperCamelCase)
model.eval()
__UpperCamelCase :List[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> List[str]:
__UpperCamelCase :Optional[Any] = self.num_labels
__UpperCamelCase :Tuple = DistilBertForTokenClassification(config=__UpperCamelCase)
model.to(__UpperCamelCase)
model.eval()
__UpperCamelCase :List[str] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Union[str, Any]:
__UpperCamelCase :Optional[int] = self.num_choices
__UpperCamelCase :List[str] = DistilBertForMultipleChoice(config=__UpperCamelCase)
model.to(__UpperCamelCase)
model.eval()
__UpperCamelCase :Any = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :str = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :Optional[int] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase__ ( self) -> Optional[int]:
__UpperCamelCase :Tuple = self.prepare_config_and_inputs()
(__UpperCamelCase) :Dict = config_and_inputs
__UpperCamelCase :Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Optional[Any] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
a__ : Optional[int] = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : List[str] = True
a__ : Any = True
a__ : List[Any] = True
a__ : Optional[int] = True
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :str = DistilBertModelTester(self)
__UpperCamelCase :Dict = ConfigTester(self , config_class=__UpperCamelCase , dim=37)
def UpperCamelCase__ ( self) -> Optional[int]:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__UpperCamelCase)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__UpperCamelCase)
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__UpperCamelCase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__UpperCamelCase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__UpperCamelCase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__UpperCamelCase)
@slow
def UpperCamelCase__ ( self) -> Optional[Any]:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Optional[int] = DistilBertModel.from_pretrained(__UpperCamelCase)
self.assertIsNotNone(__UpperCamelCase)
@slow
@require_torch_gpu
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__UpperCamelCase :List[Any] = True
__UpperCamelCase :Any = model_class(config=__UpperCamelCase)
__UpperCamelCase :Any = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase)
__UpperCamelCase :Optional[int] = torch.jit.trace(
__UpperCamelCase , (inputs_dict['''input_ids'''].to('''cpu'''), inputs_dict['''attention_mask'''].to('''cpu''')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__UpperCamelCase , os.path.join(__UpperCamelCase , '''traced_model.pt'''))
__UpperCamelCase :Dict = torch.jit.load(os.path.join(__UpperCamelCase , '''traced_model.pt''') , map_location=__UpperCamelCase)
loaded(inputs_dict['''input_ids'''].to(__UpperCamelCase) , inputs_dict['''attention_mask'''].to(__UpperCamelCase))
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Any = DistilBertModel.from_pretrained('''distilbert-base-uncased''')
__UpperCamelCase :int = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]])
__UpperCamelCase :List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
__UpperCamelCase :int = model(__UpperCamelCase , attention_mask=__UpperCamelCase)[0]
__UpperCamelCase :Any = torch.Size((1, 11, 768))
self.assertEqual(output.shape , __UpperCamelCase)
__UpperCamelCase :str = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4))
| 167 |
import math
class a :
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase )-> int:
'''simple docstring'''
A__ : str =0.0
A__ : Optional[Any] =0.0
for i in range(len(__UpperCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> list[list[int | float]]:
'''simple docstring'''
for i in range(len(__UpperCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def SCREAMING_SNAKE_CASE__ ( ) -> None:
# Training Examples ( m, n )
A__ : List[Any] =[[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
A__ : Any =[[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
A__ : List[str] =SelfOrganizingMap()
A__ : Dict =3
A__ : Optional[int] =0.5
for _ in range(snake_case_ ):
for j in range(len(snake_case_ ) ):
# training sample
A__ : str =training_samples[j]
# Compute the winning vector
A__ : Tuple =self_organizing_map.get_winner(snake_case_, snake_case_ )
# Update the winning vector
A__ : Optional[int] =self_organizing_map.update(snake_case_, snake_case_, snake_case_, snake_case_ )
# classify test sample
A__ : Optional[int] =[0, 0, 0, 1]
A__ : Any =self_organizing_map.get_winner(snake_case_, snake_case_ )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 416 | 0 |
def snake_case ( UpperCAmelCase : list, UpperCAmelCase : list, UpperCAmelCase : int ):
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
A = [p / w for p, w in zip(UpperCAmelCase, UpperCAmelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
A = sorted(UpperCAmelCase )
# declaring useful variables
A = len(UpperCAmelCase )
A = 0
A = 0
A = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
A = sorted_profit_by_weight[length - i - 1]
A = profit_by_weight.index(UpperCAmelCase )
A = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
lowerCAmelCase_ = [int(x) for x in input('Input profits separated by spaces: ').split()]
lowerCAmelCase_ = [int(x) for x in input('Input weights separated by spaces: ').split()]
lowerCAmelCase_ = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 706 |
from collections import deque
from .hash_table import HashTable
class UpperCamelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self : Any ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
'''simple docstring'''
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def A( self : Optional[int] ,_SCREAMING_SNAKE_CASE : Optional[Any] ,_SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
'''simple docstring'''
A = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_SCREAMING_SNAKE_CASE )
A = self.values[key]
def A( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return (
sum(self.charge_factor - len(_SCREAMING_SNAKE_CASE ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def A( self : Dict ,_SCREAMING_SNAKE_CASE : Optional[int] ,_SCREAMING_SNAKE_CASE : str=None ) -> Union[str, Any]:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_SCREAMING_SNAKE_CASE ) == 0
):
return key
return super()._collision_resolution(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
| 110 | 0 |
from __future__ import annotations
def UpperCamelCase (lowercase_: list[int] , lowercase_: int , lowercase_: int , lowercase_: int ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
A__ , A__ : Tuple = array[indexa], array[indexa]
def UpperCamelCase (lowercase_: list[int] , lowercase_: int , lowercase_: int , lowercase_: int ) -> None:
if length > 1:
A__ : Tuple = int(length / 2 )
for i in range(lowercase_ , low + middle ):
comp_and_swap(lowercase_ , lowercase_ , i + middle , lowercase_ )
bitonic_merge(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
bitonic_merge(lowercase_ , low + middle , lowercase_ , lowercase_ )
def UpperCamelCase (lowercase_: list[int] , lowercase_: int , lowercase_: int , lowercase_: int ) -> None:
if length > 1:
A__ : Tuple = int(length / 2 )
bitonic_sort(lowercase_ , lowercase_ , lowercase_ , 1 )
bitonic_sort(lowercase_ , low + middle , lowercase_ , 0 )
bitonic_merge(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
A_ : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
A_ : str = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 456 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class _a (unittest.TestCase , __magic_name__ ):
'''simple docstring'''
def __A ( self ):
A__ : List[str] = load_tool("""text-to-speech""" )
self.tool.setup()
def __A ( self ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
A__ : Tuple = self.tool("""hey""" )
A__ : Tuple = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def __A ( self ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
A__ : Any = self.tool("""hey""" )
A__ : Optional[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 456 | 1 |
import math
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = len(lowerCamelCase__ )
lowercase__ : List[str] = int(math.floor(math.sqrt(lowerCamelCase__ ) ) )
lowercase__ : Tuple = 0
while arr[min(lowerCamelCase__ , lowerCamelCase__ ) - 1] < x:
lowercase__ : List[str] = step
step += int(math.floor(math.sqrt(lowerCamelCase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
lowercase__ : int = prev + 1
if prev == min(lowerCamelCase__ , lowerCamelCase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
lowerCAmelCase__ = int(input('''Enter the number to be searched:\n'''))
lowerCAmelCase__ = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f'''Number {x} is at index {res}''')
| 81 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : int = dict(scheduler.config )
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = dict(scheduler.config )
lowercase__ : Union[str, Any] = True
lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Union[str, Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
lowercase__ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase__ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 81 | 1 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCamelCase ( __snake_case ):
def _lowerCamelCase ( self ):
lowerCAmelCase : int = SMALL_MODEL_IDENTIFIER
lowerCAmelCase : Optional[int] = "pt"
lowerCAmelCase : Optional[Any] = "tf"
def _lowerCamelCase ( self , a_ ):
lowerCAmelCase : Tuple = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(a__ )
def _lowerCamelCase ( self , a_ ):
lowerCAmelCase : Optional[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=a__ )
model_tf.save_pretrained(a__ )
def _lowerCamelCase ( self ):
lowerCAmelCase : Dict = "mock_framework"
# Framework provided - return whatever the user provides
lowerCAmelCase : Any = FeaturesManager.determine_framework(self.test_model , a__ )
self.assertEqual(a__ , a__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(a__ )
lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(a__ , a__ )
self.assertEqual(a__ , a__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(a__ )
lowerCAmelCase : Any = FeaturesManager.determine_framework(a__ , a__ )
self.assertEqual(a__ , a__ )
def _lowerCamelCase ( self ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(a__ )
lowerCAmelCase : Any = FeaturesManager.determine_framework(a__ )
self.assertEqual(a__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(a__ )
lowerCAmelCase : Any = FeaturesManager.determine_framework(a__ )
self.assertEqual(a__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(a__ ):
lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(a__ )
def _lowerCamelCase ( self ):
lowerCAmelCase : Any = MagicMock(return_value=a__ )
with patch("transformers.onnx.features.is_tf_available" , a__ ):
lowerCAmelCase : str = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(a__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCAmelCase : str = MagicMock(return_value=a__ )
with patch("transformers.onnx.features.is_torch_available" , a__ ):
lowerCAmelCase : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(a__ , self.framework_tf )
# Both in environment -> use PyTorch
lowerCAmelCase : Optional[int] = MagicMock(return_value=a__ )
lowerCAmelCase : str = MagicMock(return_value=a__ )
with patch("transformers.onnx.features.is_tf_available" , a__ ), patch(
"transformers.onnx.features.is_torch_available" , a__ ):
lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(a__ , self.framework_pt )
# Both not in environment -> raise error
lowerCAmelCase : Dict = MagicMock(return_value=a__ )
lowerCAmelCase : Union[str, Any] = MagicMock(return_value=a__ )
with patch("transformers.onnx.features.is_tf_available" , a__ ), patch(
"transformers.onnx.features.is_torch_available" , a__ ):
with self.assertRaises(a__ ):
lowerCAmelCase : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
| 525 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
A = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
A = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("""RGB""" )
return image
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] ) -> Dict:
"""simple docstring"""
A = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _lowerCAmelCase ( UpperCamelCase__: Tuple , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A = dct.pop(UpperCamelCase__ )
A = val
def _lowerCAmelCase ( UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ) -> Tuple:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
A = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
A = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
A = torch.cat((q_bias, torch.zeros_like(UpperCamelCase__ , requires_grad=UpperCamelCase__ ), v_bias) )
A = qkv_bias
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: Tuple ) -> Dict:
"""simple docstring"""
A = 3_64 if """coco""" in model_name else 2_24
A = BlipaVisionConfig(image_size=UpperCamelCase__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
A = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=UpperCamelCase__ ).to_dict()
elif "opt-6.7b" in model_name:
A = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=UpperCamelCase__ ).to_dict()
elif "t5-xl" in model_name:
A = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
A = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
A = BlipaConfig(vision_config=UpperCamelCase__ , text_config=UpperCamelCase__ )
return config, image_size
@torch.no_grad()
def _lowerCAmelCase ( UpperCamelCase__: str , UpperCamelCase__: Optional[Any]=None , UpperCamelCase__: Tuple=False ) -> List[str]:
"""simple docstring"""
A = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
A = tokenizer("""\n""" , add_special_tokens=UpperCamelCase__ ).input_ids[0]
A , A = get_blipa_config(UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
A = BlipaForConditionalGeneration(UpperCamelCase__ ).eval()
A = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
A , A = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
A = """cuda""" if torch.cuda.is_available() else """cpu"""
A , A , A = load_model_and_preprocess(
name=UpperCamelCase__ , model_type=UpperCamelCase__ , is_eval=UpperCamelCase__ , device=UpperCamelCase__ )
original_model.eval()
print("""Done!""" )
# update state dict keys
A = original_model.state_dict()
A = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
A = state_dict.pop(UpperCamelCase__ )
if key.startswith("""Qformer.bert""" ):
A = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
A = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
A = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
A = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
A = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
A = key.replace("""t5""" , """language""" )
A = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase__ , UpperCamelCase__ )
A , A = hf_model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
A = load_demo_image()
A = vis_processors["""eval"""](UpperCamelCase__ ).unsqueeze(0 ).to(UpperCamelCase__ )
A = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(UpperCamelCase__ )
# create processor
A = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ )
A = BlipaProcessor(image_processor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
A = processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values.to(UpperCamelCase__ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
hf_model.to(UpperCamelCase__ )
with torch.no_grad():
if "opt" in model_name:
A = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
A = hf_model(UpperCamelCase__ , UpperCamelCase__ ).logits
else:
A = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
A = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
A = hf_model(UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
A = torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=UpperCamelCase__ )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
A = torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=UpperCamelCase__ )
else:
# cast to same type
A = logits.dtype
assert torch.allclose(original_logits.to(UpperCamelCase__ ) , UpperCamelCase__ , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
A = """"""
A = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ).input_ids.to(UpperCamelCase__ )
A = original_model.generate({"""image""": original_pixel_values} )
A = hf_model.generate(
UpperCamelCase__ , UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , UpperCamelCase__ )
A = input_ids.shape[1]
A = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCamelCase__ )
A = [text.strip() for text in output_text]
print("""HF generation:""" , UpperCamelCase__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
_lowercase : List[Any] = argparse.ArgumentParser()
_lowercase : List[str] = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_lowercase : Tuple = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641 | 0 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(__a ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod() | 720 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return (data["data"], data["target"])
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XGBClassifier()
classifier.fit(_lowercase , _lowercase )
return classifier
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = load_iris()
UpperCAmelCase_, UpperCAmelCase_ : Any = data_handling(_lowercase )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = train_test_split(
_lowercase , _lowercase , test_size=0.25 )
UpperCAmelCase_ : Dict = iris['''target_names''']
# Create an XGBoost Classifier from the training data
UpperCAmelCase_ : int = xgboost(_lowercase , _lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_lowercase , _lowercase , _lowercase , display_labels=_lowercase , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main() | 300 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ : Optional[int] = "deta"
a_ : List[str] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Tuple , a_ : Any=None , a_ : Optional[int]=9_00 , a_ : Optional[int]=20_48 , a_ : List[Any]=6 , a_ : List[Any]=20_48 , a_ : Optional[Any]=8 , a_ : Any=6 , a_ : Any=10_24 , a_ : Tuple=8 , a_ : Any=0.0 , a_ : Optional[int]=True , a_ : Any="relu" , a_ : int=2_56 , a_ : str=0.1 , a_ : Optional[int]=0.0 , a_ : List[str]=0.0 , a_ : List[str]=0.02 , a_ : Dict=1.0 , a_ : List[Any]=True , a_ : int=False , a_ : Dict="sine" , a_ : Union[str, Any]=5 , a_ : Tuple=4 , a_ : List[str]=4 , a_ : List[str]=True , a_ : Tuple=3_00 , a_ : int=True , a_ : Union[str, Any]=True , a_ : Union[str, Any]=1 , a_ : List[str]=5 , a_ : str=2 , a_ : Tuple=1 , a_ : Any=1 , a_ : Dict=5 , a_ : Any=2 , a_ : str=0.1 , a_ : List[str]=0.25 , **a_ : str , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCAmelCase_ : str = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(snake_case_ , snake_case_ ):
lowerCAmelCase_ : Tuple = backbone_config.pop("model_type" )
lowerCAmelCase_ : Dict = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ : Union[str, Any] = config_class.from_dict(snake_case_ )
lowerCAmelCase_ : Union[str, Any] = backbone_config
lowerCAmelCase_ : str = num_queries
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = d_model
lowerCAmelCase_ : Dict = encoder_ffn_dim
lowerCAmelCase_ : Tuple = encoder_layers
lowerCAmelCase_ : str = encoder_attention_heads
lowerCAmelCase_ : Optional[int] = decoder_ffn_dim
lowerCAmelCase_ : Tuple = decoder_layers
lowerCAmelCase_ : str = decoder_attention_heads
lowerCAmelCase_ : Union[str, Any] = dropout
lowerCAmelCase_ : int = attention_dropout
lowerCAmelCase_ : Tuple = activation_dropout
lowerCAmelCase_ : List[str] = activation_function
lowerCAmelCase_ : Union[str, Any] = init_std
lowerCAmelCase_ : str = init_xavier_std
lowerCAmelCase_ : Any = encoder_layerdrop
lowerCAmelCase_ : Dict = auxiliary_loss
lowerCAmelCase_ : Any = position_embedding_type
# deformable attributes
lowerCAmelCase_ : Dict = num_feature_levels
lowerCAmelCase_ : str = encoder_n_points
lowerCAmelCase_ : Union[str, Any] = decoder_n_points
lowerCAmelCase_ : List[str] = two_stage
lowerCAmelCase_ : Any = two_stage_num_proposals
lowerCAmelCase_ : Any = with_box_refine
lowerCAmelCase_ : Optional[int] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowerCAmelCase_ : Union[str, Any] = class_cost
lowerCAmelCase_ : Tuple = bbox_cost
lowerCAmelCase_ : Dict = giou_cost
# Loss coefficients
lowerCAmelCase_ : Tuple = mask_loss_coefficient
lowerCAmelCase_ : Optional[int] = dice_loss_coefficient
lowerCAmelCase_ : Union[str, Any] = bbox_loss_coefficient
lowerCAmelCase_ : Optional[int] = giou_loss_coefficient
lowerCAmelCase_ : Any = eos_coefficient
lowerCAmelCase_ : int = focal_alpha
super().__init__(is_encoder_decoder=snake_case_ , **snake_case_ )
@property
def lowerCamelCase ( self : List[str] ):
return self.encoder_attention_heads
@property
def lowerCamelCase ( self : str ):
return self.d_model
def lowerCamelCase ( self : Optional[Any] ):
lowerCAmelCase_ : Any = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Optional[int] = self.backbone_config.to_dict()
lowerCAmelCase_ : str = self.__class__.model_type
return output
| 610 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def A__ ( A__ ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(A__ ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(A__ )
return 2.0 * image - 1.0
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ) -> int:
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self , snake_case_ = None , snake_case_ = 1 , snake_case_ = 100 , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}""" )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 426 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[int] = None
_snake_case : Optional[jnp.ndarray] = None
_snake_case : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def __a ( cls :Union[str, Any] ):
return cls()
@dataclass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : jnp.ndarray
_snake_case : jnp.ndarray
_snake_case : KarrasVeSchedulerState
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
@property
def __a ( self :Union[str, Any] ):
return True
@register_to_config
def __init__( self :Optional[int] , lowerCamelCase__ :float = 0.02 , lowerCamelCase__ :float = 1_00 , lowerCamelCase__ :float = 1.007 , lowerCamelCase__ :float = 80 , lowerCamelCase__ :float = 0.05 , lowerCamelCase__ :float = 50 , ):
pass
def __a ( self :Any ):
return KarrasVeSchedulerState.create()
def __a ( self :int , lowerCamelCase__ :KarrasVeSchedulerState , lowerCamelCase__ :int , lowerCamelCase__ :Tuple = () ):
UpperCamelCase__ :Any = jnp.arange(0 , lowerCamelCase__ )[::-1].copy()
UpperCamelCase__ :List[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCamelCase__ , schedule=jnp.array(lowerCamelCase__ , dtype=jnp.floataa ) , timesteps=lowerCamelCase__ , )
def __a ( self :Optional[Any] , lowerCamelCase__ :KarrasVeSchedulerState , lowerCamelCase__ :jnp.ndarray , lowerCamelCase__ :float , lowerCamelCase__ :random.KeyArray , ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCamelCase__ :Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
UpperCamelCase__ :Optional[Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCamelCase__ :List[Any] = random.split(lowerCamelCase__ , num=1 )
UpperCamelCase__ :Tuple = self.config.s_noise * random.normal(key=lowerCamelCase__ , shape=sample.shape )
UpperCamelCase__ :Optional[Any] = sigma + gamma * sigma
UpperCamelCase__ :Dict = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __a ( self :str , lowerCamelCase__ :KarrasVeSchedulerState , lowerCamelCase__ :jnp.ndarray , lowerCamelCase__ :float , lowerCamelCase__ :float , lowerCamelCase__ :jnp.ndarray , lowerCamelCase__ :bool = True , ):
UpperCamelCase__ :Dict = sample_hat + sigma_hat * model_output
UpperCamelCase__ :Tuple = (sample_hat - pred_original_sample) / sigma_hat
UpperCamelCase__ :str = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCamelCase__ , derivative=lowerCamelCase__ , state=lowerCamelCase__ )
def __a ( self :Optional[int] , lowerCamelCase__ :KarrasVeSchedulerState , lowerCamelCase__ :jnp.ndarray , lowerCamelCase__ :float , lowerCamelCase__ :float , lowerCamelCase__ :jnp.ndarray , lowerCamelCase__ :jnp.ndarray , lowerCamelCase__ :jnp.ndarray , lowerCamelCase__ :bool = True , ):
UpperCamelCase__ :List[str] = sample_prev + sigma_prev * model_output
UpperCamelCase__ :Optional[Any] = (sample_prev - pred_original_sample) / sigma_prev
UpperCamelCase__ :Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCamelCase__ , derivative=lowerCamelCase__ , state=lowerCamelCase__ )
def __a ( self :str , lowerCamelCase__ :KarrasVeSchedulerState , lowerCamelCase__ :Tuple , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ):
raise NotImplementedError() | 383 |
from __future__ import annotations
def A ( lowercase__ : list[int] ) -> int:
if not nums:
return 0
UpperCamelCase__ :Dict = nums[0]
UpperCamelCase__ :Dict = 0
for num in nums[1:]:
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = (
max_excluding + num,
max(lowercase__ , lowercase__ ),
)
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 383 | 1 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_a : Optional[Any] = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class _lowercase ( unittest.TestCase ):
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : Union[str, None] = None , SCREAMING_SNAKE_CASE_ : Union[List[str], None] = None , SCREAMING_SNAKE_CASE_ : Union[str, List[str], None] = None , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Any:
__snake_case = [file for file in os.listdir(SCREAMING_SNAKE_CASE_ ) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )]
if identifier is not None:
__snake_case = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for n_ in n_identifier:
__snake_case = [file for file in files if n_ not in file]
else:
__snake_case = [file for file in files if n_identifier not in file]
__snake_case = ignore_files or []
ignore_files.append('__init__.py' )
__snake_case = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , SCREAMING_SNAKE_CASE_ )
if only_modules:
__snake_case = file.split('.' )[0]
try:
__snake_case = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__snake_case = doctest.DocTestSuite(SCREAMING_SNAKE_CASE_ )
__snake_case = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'{module_identifier} is not a module.' )
else:
__snake_case = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def a ( self : List[Any] ) -> int:
__snake_case = Path('src/transformers' )
__snake_case = 'modeling'
__snake_case = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(SCREAMING_SNAKE_CASE_ , identifier=SCREAMING_SNAKE_CASE_ , ignore_files=SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] ) -> str:
__snake_case = Path('src/transformers' )
__snake_case = 'tokenization'
self.analyze_directory(SCREAMING_SNAKE_CASE_ , identifier=SCREAMING_SNAKE_CASE_ )
def a ( self : Optional[Any] ) -> Tuple:
__snake_case = Path('src/transformers' )
__snake_case = 'configuration'
self.analyze_directory(SCREAMING_SNAKE_CASE_ , identifier=SCREAMING_SNAKE_CASE_ )
def a ( self : List[Any] ) -> int:
__snake_case = Path('src/transformers' )
__snake_case = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(SCREAMING_SNAKE_CASE_ , n_identifier=SCREAMING_SNAKE_CASE_ )
def a ( self : int ) -> Tuple:
__snake_case = Path('docs/source' )
__snake_case = ['favicon.ico']
self.analyze_directory(SCREAMING_SNAKE_CASE_ , ignore_files=SCREAMING_SNAKE_CASE_ , only_modules=SCREAMING_SNAKE_CASE_ )
| 56 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class A( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
A = CpmAntTokenizer
A = False
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
_UpperCamelCase :str = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
_UpperCamelCase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Any = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
_UpperCamelCase :List[str] = '''今天天气真好!'''
_UpperCamelCase :Tuple = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
_UpperCamelCase :Tuple = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :Dict = '''今天天气真好!'''
_UpperCamelCase :Optional[int] = [tokenizer.bos_token] + tokens
_UpperCamelCase :List[str] = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :str = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 355 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : str = logging.get_logger(__name__)
__a : Tuple = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class _UpperCamelCase ( lowercase__ ):
"""simple docstring"""
__a : Optional[int] = 'mra'
def __init__( self , lowerCAmelCase__=5_02_65 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="absolute" , lowerCAmelCase__=4 , lowerCAmelCase__="full" , lowerCAmelCase__=0 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = type_vocab_size
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = block_per_row
__lowercase = approx_mode
__lowercase = initial_prior_first_n_blocks
__lowercase = initial_prior_diagonal_n_blocks | 701 | import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
__a : Optional[Any] = logging.getLogger(__name__)
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=lowercase , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=lowercase , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=lowercase , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=lowercase , default='''data/dump''' , help='''The dump file prefix.''' )
__lowercase = parser.parse_args()
logger.info(F"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
__lowercase = BertTokenizer.from_pretrained(args.tokenizer_name )
__lowercase = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__lowercase = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__lowercase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__lowercase = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__lowercase = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__lowercase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__lowercase = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__lowercase = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F"Loading text from {args.file_path}" )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
__lowercase = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F"{len(lowercase )} examples to process." )
__lowercase = []
__lowercase = 0
__lowercase = 10000
__lowercase = time.time()
for text in data:
__lowercase = F"{bos} {text.strip()} {sep}"
__lowercase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
rslt.append(lowercase )
iter += 1
if iter % interval == 0:
__lowercase = time.time()
logger.info(F"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
__lowercase = time.time()
logger.info('''Finished binarization''' )
logger.info(F"{len(lowercase )} examples processed." )
__lowercase = F"{args.dump_file}.{args.tokenizer_name}.pickle"
__lowercase = tokenizer.vocab_size
if vocab_size < (1 << 16):
__lowercase = [np.uintaa(lowercase ) for d in rslt]
else:
__lowercase = [np.intaa(lowercase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"Dump to {dp_file}" )
with open(lowercase , '''wb''' ) as handle:
pickle.dump(rslt_ , lowercase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main() | 522 | 0 |
from pathlib import Path
import fire
from tqdm import tqdm
def _UpperCAmelCase ( a : Any="ro" , a : Optional[Any]="en" , a : Any="wmt16" , a : Optional[Any]=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
snake_case__ = F'''{src_lang}-{tgt_lang}'''
print(F'''Converting {dataset}-{pair}''' )
snake_case__ = datasets.load_dataset(a , a )
if save_dir is None:
snake_case__ = F'''{dataset}-{pair}'''
snake_case__ = Path(a )
save_dir.mkdir(exist_ok=a )
for split in ds.keys():
print(F'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
snake_case__ = """val""" if split == """validation""" else split
snake_case__ = save_dir.joinpath(F'''{fn}.source''' )
snake_case__ = save_dir.joinpath(F'''{fn}.target''' )
snake_case__ = src_path.open("""w+""" )
snake_case__ = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
snake_case__ = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 654 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''wavlm'''
def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__)
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = conv_bias
snake_case__ = num_buckets
snake_case__ = max_bucket_distance
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim)
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = num_ctc_classes
snake_case__ = vocab_size
snake_case__ = do_stable_layer_norm
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''')
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
snake_case__ = num_codevectors_per_group
snake_case__ = num_codevector_groups
snake_case__ = contrastive_logits_temperature
snake_case__ = num_negatives
snake_case__ = codevector_dim
snake_case__ = proj_codevector_dim
snake_case__ = diversity_loss_weight
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# adapter
snake_case__ = add_adapter
snake_case__ = adapter_kernel_size
snake_case__ = adapter_stride
snake_case__ = num_adapter_layers
snake_case__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = list(UpperCamelCase__)
snake_case__ = xvector_output_dim
@property
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 654 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : str = logging.get_logger(__name__)
A__ : Any = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''biogpt'''
def __init__( self : List[str] , __a : str=42384 , __a : Tuple=1024 , __a : int=24 , __a : List[str]=16 , __a : Optional[int]=4096 , __a : List[Any]="gelu" , __a : int=0.1 , __a : Optional[int]=0.1 , __a : Optional[Any]=1024 , __a : Union[str, Any]=0.0_2 , __a : int=1e-12 , __a : str=True , __a : Tuple=True , __a : List[Any]=0.0 , __a : Optional[int]=0.0 , __a : List[Any]=1 , __a : List[str]=0 , __a : Optional[Any]=2 , **__a : str , ) -> Optional[int]:
'''simple docstring'''
__snake_case : Dict = vocab_size
__snake_case : Dict = max_position_embeddings
__snake_case : Optional[Any] = hidden_size
__snake_case : Tuple = num_hidden_layers
__snake_case : List[Any] = num_attention_heads
__snake_case : Any = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : Any = hidden_dropout_prob
__snake_case : str = attention_probs_dropout_prob
__snake_case : Any = initializer_range
__snake_case : List[str] = layer_norm_eps
__snake_case : List[str] = scale_embedding
__snake_case : List[str] = use_cache
__snake_case : Union[str, Any] = layerdrop
__snake_case : int = activation_dropout
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
| 124 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
A__ : Optional[int] = False
class snake_case__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def A_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : List[str] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__snake_case : str = torch.manual_seed(0 )
__snake_case : int = pipe.dual_guided(
prompt='first prompt' , image=__a , text_to_image_strength=0.7_5 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__a )
__snake_case : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained(__a , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : List[str] = generator.manual_seed(0 )
__snake_case : Tuple = pipe.dual_guided(
prompt='first prompt' , image=__a , text_to_image_strength=0.7_5 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : str = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[Any] = 'cyberpunk 2077'
__snake_case : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__snake_case : List[str] = torch.manual_seed(0 )
__snake_case : Tuple = pipe.dual_guided(
prompt=__a , image=__a , text_to_image_strength=0.7_5 , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__snake_case : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : List[str] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__snake_case : Tuple = 'A painting of a squirrel eating a burger '
__snake_case : str = torch.manual_seed(0 )
__snake_case : Optional[Any] = pipe.text_to_image(
prompt=__a , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
__snake_case : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : Optional[int] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__snake_case : int = pipe.image_variation(__a , generator=__a , output_type='numpy' ).images
__snake_case : Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : str = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 124 | 1 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__lowercase = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
__lowercase = 10
__lowercase = 256
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS:
return None
__UpperCamelCase :List[str] = MinHash(num_perm=SCREAMING_SNAKE_CASE )
for token in set(SCREAMING_SNAKE_CASE ):
min_hash.update(token.encode() )
return min_hash
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0}
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , *,
__lowercase = 0.85 , ) -> Union[str, Any]:
__UpperCamelCase :Optional[Any] = duplication_jaccard_threshold
__UpperCamelCase :Any = NUM_PERM
__UpperCamelCase :Any = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
__UpperCamelCase :int = defaultdict(__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> None:
__UpperCamelCase :str = self._index.query(__lowercase)
if code_key in self._index.keys:
print(f"""Duplicate key {code_key}""")
return
self._index.insert(__lowercase , __lowercase)
if len(__lowercase) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__lowercase)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__lowercase)
def UpperCamelCase__ ( self) -> List[List[Dict]]:
__UpperCamelCase :Tuple = []
for base, duplicates in self._duplicate_clusters.items():
__UpperCamelCase :List[str] = [base] + list(__lowercase)
# reformat the cluster to be a list of dict
__UpperCamelCase :List[Any] = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(__lowercase)
return duplicate_clusters
def UpperCamelCase__ ( self , __lowercase) -> None:
__UpperCamelCase :List[Any] = self.get_duplicate_clusters()
with open(__lowercase , '''w''') as f:
json.dump(__lowercase , __lowercase)
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[str] = element
__UpperCamelCase :Optional[int] = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE , max_queue_size=10_000 ) , chunksize=100 , ):
if data is not None:
yield data
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE ) ) , max_queue_size=100 ) ):
di.add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = get_tokens(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = get_tokens(SCREAMING_SNAKE_CASE )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__lowercase = None
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = []
for elementa in cluster:
__UpperCamelCase :str = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__UpperCamelCase :Union[str, Any] = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__UpperCamelCase :int = 1
extremes.append(SCREAMING_SNAKE_CASE )
return extremes
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
global _shared_dataset
__UpperCamelCase :Optional[Any] = dataset
__UpperCamelCase :str = []
__UpperCamelCase :List[str] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) , total=len(SCREAMING_SNAKE_CASE ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE )
return extremes_list
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.85 ):
'''simple docstring'''
__UpperCamelCase :Dict = make_duplicate_clusters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__UpperCamelCase :Optional[Any] = {}
__UpperCamelCase :Optional[int] = find_extremes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for extremes in extremes_clusters:
for element in extremes:
__UpperCamelCase :int = element
__UpperCamelCase :List[Any] = duplicate_indices - set(extreme_dict.keys() )
__UpperCamelCase :Optional[int] = dataset.filter(lambda SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__UpperCamelCase :Any = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__UpperCamelCase :Tuple = extreme_dict[element['''base_index''']]['''copies''']
print(f"""Original dataset size: {len(SCREAMING_SNAKE_CASE )}""" )
print(f"""Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE )}""" )
print(f"""Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE )}""" )
print(f"""Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE )}""" )
print(f"""Filtered dataset size: {len(SCREAMING_SNAKE_CASE )}""" )
return ds_filter, duplicate_clusters
| 167 | from __future__ import annotations
import numpy as np
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :Optional[Any] = np.shape(SCREAMING_SNAKE_CASE )
if rows != columns:
__UpperCamelCase :Dict = (
'''\'table\' has to be of square shaped array but got a '''
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = np.zeros((rows, columns) )
__UpperCamelCase :Tuple = np.zeros((rows, columns) )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Union[str, Any] = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
__UpperCamelCase :Tuple = (table[i][j] - total) / upper[j][j]
__UpperCamelCase :Optional[int] = 1
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :Dict = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167 | 1 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
A = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Any):
lowerCamelCase : str = nn.functional.normalize(UpperCAmelCase__)
lowerCamelCase : int = nn.functional.normalize(UpperCAmelCase__)
return torch.mm(UpperCAmelCase__ , normalized_text_embeds.t())
class __snake_case ( a__):
_lowerCAmelCase = CLIPConfig
_lowerCAmelCase = ['''CLIPEncoderLayer''']
def __init__( self, A ):
"""simple docstring"""
super().__init__(A )
lowerCamelCase : Optional[Any] = CLIPVisionModel(config.vision_config )
lowerCamelCase : str = nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=A )
lowerCamelCase : str = nn.Parameter(torch.ones(17, config.projection_dim ), requires_grad=A )
lowerCamelCase : List[Any] = nn.Parameter(torch.ones(3, config.projection_dim ), requires_grad=A )
lowerCamelCase : Dict = nn.Parameter(torch.ones(17 ), requires_grad=A )
lowerCamelCase : Dict = nn.Parameter(torch.ones(3 ), requires_grad=A )
@torch.no_grad()
def UpperCAmelCase_ ( self, A, A ):
"""simple docstring"""
lowerCamelCase : List[str] = self.vision_model(A )[1] # pooled_output
lowerCamelCase : int = self.visual_projection(A )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase : List[str] = cosine_distance(A, self.special_care_embeds ).cpu().float().numpy()
lowerCamelCase : Tuple = cosine_distance(A, self.concept_embeds ).cpu().float().numpy()
lowerCamelCase : int = []
lowerCamelCase : str = image_embeds.shape[0]
for i in range(A ):
lowerCamelCase : List[Any] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase : Tuple = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
lowerCamelCase : Union[str, Any] = special_cos_dist[i][concept_idx]
lowerCamelCase : str = self.special_care_embeds_weights[concept_idx].item()
lowerCamelCase : Dict = round(concept_cos - concept_threshold + adjustment, 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
lowerCamelCase : int = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
lowerCamelCase : int = cos_dist[i][concept_idx]
lowerCamelCase : Union[str, Any] = self.concept_embeds_weights[concept_idx].item()
lowerCamelCase : Dict = round(concept_cos - concept_threshold + adjustment, 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(A )
result.append(A )
lowerCamelCase : int = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase_ ( self, A, A ):
"""simple docstring"""
lowerCamelCase : Tuple = self.vision_model(A )[1] # pooled_output
lowerCamelCase : Optional[int] = self.visual_projection(A )
lowerCamelCase : Dict = cosine_distance(A, self.special_care_embeds )
lowerCamelCase : str = cosine_distance(A, self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCamelCase : Optional[int] = 0.0
lowerCamelCase : int = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCamelCase : List[Any] = torch.any(special_scores > 0, dim=1 )
lowerCamelCase : List[Any] = special_care * 0.01
lowerCamelCase : Optional[Any] = special_adjustment.unsqueeze(1 ).expand(-1, cos_dist.shape[1] )
lowerCamelCase : Tuple = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCamelCase : str = torch.any(concept_scores > 0, dim=1 )
return images, has_nsfw_concepts
| 705 |
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger(__name__)
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : Union[str, Any] = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'])
lowerCamelCase : Dict = MaskFormerConfig(backbone_config=UpperCAmelCase__)
lowerCamelCase : Dict = 'huggingface/label-files'
if "ade20k-full" in model_name:
# this should be ok
lowerCamelCase : Union[str, Any] = 8_47
lowerCamelCase : Any = 'maskformer-ade20k-full-id2label.json'
elif "ade" in model_name:
# this should be ok
lowerCamelCase : str = 1_50
lowerCamelCase : Union[str, Any] = 'ade20k-id2label.json'
elif "coco-stuff" in model_name:
# this should be ok
lowerCamelCase : int = 1_71
lowerCamelCase : List[Any] = 'maskformer-coco-stuff-id2label.json'
elif "coco" in model_name:
# TODO
lowerCamelCase : int = 1_33
lowerCamelCase : List[str] = 'coco-panoptic-id2label.json'
elif "cityscapes" in model_name:
# this should be ok
lowerCamelCase : str = 19
lowerCamelCase : Union[str, Any] = 'cityscapes-id2label.json'
elif "vistas" in model_name:
# this should be ok
lowerCamelCase : Union[str, Any] = 65
lowerCamelCase : List[Any] = 'mapillary-vistas-id2label.json'
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='dataset') , 'r'))
lowerCamelCase : str = {int(UpperCAmelCase__): v for k, v in idalabel.items()}
return config
def UpperCAmelCase ( UpperCAmelCase__ : List[str]):
lowerCamelCase : List[Any] = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight'))
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias'))
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight'))
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias'))
# stages
for i in range(len(config.backbone_config.depths)):
for j in range(config.backbone_config.depths[i]):
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight'''))
rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias'''))
if i < 3:
rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight'''))
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight'''))
rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias'''))
rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight'''))
rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias'''))
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight'))
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight'))
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias'))
for source_index, target_index in zip(range(3 , 0 , -1) , range(0 , 3)):
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight'''))
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight'''))
rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias'''))
rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight'''))
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight'''))
rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias'''))
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight'))
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias'))
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers):
# self-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias'''))
# cross-attention out projection
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias'''))
# MLP 1
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias'''))
# MLP 2
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias'''))
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias'''))
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias'''))
# layernorm 3 (final layernorm)
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias'''))
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight'))
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias'))
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight'))
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight'))
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias'))
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight'))
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias'))
for i in range(3):
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight'''))
rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias'''))
# fmt: on
return rename_keys
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any]):
lowerCamelCase : List[str] = dct.pop(UpperCAmelCase__)
lowerCamelCase : Tuple = val
def UpperCAmelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any]):
lowerCamelCase : Union[str, Any] = [int(backbone_config.embed_dim * 2**i) for i in range(len(backbone_config.depths))]
for i in range(len(backbone_config.depths)):
lowerCamelCase : str = num_features[i]
for j in range(backbone_config.depths[i]):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase : Union[str, Any] = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''')
lowerCamelCase : int = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Dict = in_proj_weight[:dim, :]
lowerCamelCase : Dict = in_proj_bias[: dim]
lowerCamelCase : List[Any] = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase : Dict = in_proj_bias[
dim : dim * 2
]
lowerCamelCase : List[str] = in_proj_weight[
-dim :, :
]
lowerCamelCase : int = in_proj_bias[-dim :]
# fmt: on
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any]):
# fmt: off
lowerCamelCase : Tuple = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase : Optional[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''')
lowerCamelCase : List[Any] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[str] = in_proj_weight[: hidden_size, :]
lowerCamelCase : str = in_proj_bias[:config.hidden_size]
lowerCamelCase : int = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase : List[Any] = in_proj_weight[-hidden_size :, :]
lowerCamelCase : Optional[int] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase : Tuple = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''')
lowerCamelCase : List[str] = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''')
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : Dict = in_proj_weight[: hidden_size, :]
lowerCamelCase : Any = in_proj_bias[:config.hidden_size]
lowerCamelCase : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase : Union[str, Any] = in_proj_weight[-hidden_size :, :]
lowerCamelCase : List[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCAmelCase ( ):
lowerCamelCase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase : Tuple = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__).raw)
return im
@torch.no_grad()
def UpperCAmelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False):
lowerCamelCase : Optional[Any] = get_maskformer_config(UpperCAmelCase__)
# load original state_dict
with open(UpperCAmelCase__ , 'rb') as f:
lowerCamelCase : Tuple = pickle.load(UpperCAmelCase__)
lowerCamelCase : str = data['model']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCamelCase : Optional[Any] = create_rename_keys(UpperCAmelCase__)
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)
read_in_swin_q_k_v(UpperCAmelCase__ , config.backbone_config)
read_in_decoder_q_k_v(UpperCAmelCase__ , UpperCAmelCase__)
# update to torch tensors
for key, value in state_dict.items():
lowerCamelCase : List[str] = torch.from_numpy(UpperCAmelCase__)
# load 🤗 model
lowerCamelCase : str = MaskFormerForInstanceSegmentation(UpperCAmelCase__)
model.eval()
for name, param in model.named_parameters():
print(UpperCAmelCase__ , param.shape)
lowerCamelCase , lowerCamelCase : Optional[Any] = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__)
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCAmelCase__) == 0, F'''Unexpected keys: {unexpected_keys}'''
# verify results
lowerCamelCase : Union[str, Any] = prepare_img()
if "vistas" in model_name:
lowerCamelCase : Optional[int] = 65
elif "cityscapes" in model_name:
lowerCamelCase : Any = 6_55_35
else:
lowerCamelCase : List[Any] = 2_55
lowerCamelCase : str = True if 'ade' in model_name else False
lowerCamelCase : List[Any] = MaskFormerImageProcessor(ignore_index=UpperCAmelCase__ , reduce_labels=UpperCAmelCase__)
lowerCamelCase : str = image_processor(UpperCAmelCase__ , return_tensors='pt')
lowerCamelCase : Dict = model(**UpperCAmelCase__)
print('Logits:' , outputs.class_queries_logits[0, :3, :3])
if model_name == "maskformer-swin-tiny-ade":
lowerCamelCase : List[str] = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]])
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase__ , atol=1E-4)
print('Looks ok!')
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''')
Path(UpperCAmelCase__).mkdir(exist_ok=UpperCAmelCase__)
model.save_pretrained(UpperCAmelCase__)
image_processor.save_pretrained(UpperCAmelCase__)
if push_to_hub:
print('Pushing model and image processor to the hub...')
model.push_to_hub(F'''nielsr/{model_name}''')
image_processor.push_to_hub(F'''nielsr/{model_name}''')
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
A = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 449 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=3 , UpperCAmelCase : Dict=32 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Optional[Any]=10 , UpperCAmelCase : Optional[int]=[10, 20, 30, 40] , UpperCAmelCase : Optional[Any]=[1, 1, 2, 1] , UpperCAmelCase : int=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[int]="relu" , UpperCAmelCase : Any=3 , UpperCAmelCase : Optional[Any]=None , ) -> str:
'''simple docstring'''
lowercase : str =parent
lowercase : List[Any] =batch_size
lowercase : List[str] =image_size
lowercase : str =num_channels
lowercase : int =embeddings_size
lowercase : List[Any] =hidden_sizes
lowercase : str =depths
lowercase : List[Any] =is_training
lowercase : Tuple =use_labels
lowercase : Dict =hidden_act
lowercase : Optional[int] =num_labels
lowercase : Optional[Any] =scope
lowercase : List[str] =len(UpperCAmelCase_ )
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
lowercase : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Any =self.get_config()
return config, pixel_values
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A__ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowercase : List[Any] =FlaxRegNetModel(config=UpperCAmelCase_ )
lowercase : Dict =model(UpperCAmelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =self.num_labels
lowercase : int =FlaxRegNetForImageClassification(config=UpperCAmelCase_ )
lowercase : Optional[Any] =model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : List[str] ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] =self.prepare_config_and_inputs()
lowercase : Union[str, Any] =config_and_inputs
lowercase : Union[str, Any] ={"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Union[str, Any] ) -> None:
'''simple docstring'''
lowercase : List[Any] =FlaxRegNetModelTester(self )
lowercase : List[Any] =ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def A__ ( self : int ) -> int:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return
def A__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def A__ ( self : List[str] ) -> int:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def A__ ( self : Optional[int] ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def A__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
pass
def A__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Any =model_class(UpperCAmelCase_ )
lowercase : List[Any] =inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : str =[*signature.parameters.keys()]
lowercase : Tuple =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any ):
lowercase : Optional[int] =model_class(UpperCAmelCase_ )
lowercase : Optional[int] =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowercase : int =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : Optional[int] =self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase_ ) , expected_num_stages + 1 )
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : List[str] =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Any =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def A__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase : int =self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
lowercase : Dict =model_class(UpperCAmelCase_ )
@jax.jit
def model_jitted(UpperCAmelCase : List[Any] , **UpperCAmelCase : str ):
return model(pixel_values=UpperCAmelCase_ , **UpperCAmelCase_ )
with self.subTest('''JIT Enabled''' ):
lowercase : Optional[Any] =model_jitted(**UpperCAmelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase : Union[str, Any] =model_jitted(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : Any =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self : List[Any] ) -> int:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : Any =FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
lowercase : Optional[int] =self.default_image_processor
lowercase : Any =prepare_img()
lowercase : Any =image_processor(images=UpperCAmelCase_ , return_tensors='''np''' )
lowercase : Tuple =model(**UpperCAmelCase_ )
# verify the logits
lowercase : List[Any] =(1, 1000)
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
lowercase : str =jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
| 94 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = 42
lowercase_ = jnp.floataa
lowercase_ = True
def SCREAMING_SNAKE_CASE_ (self : Any) ->List[str]:
'''simple docstring'''
super().setup()
lowerCamelCase__: int =nn.Dense(5 , dtype=self.dtype)
def __call__(self : Dict , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =super().__call__(*UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: int =self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = FlaxBigBirdForNaturalQuestionsModule
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a ) -> Tuple:
"""simple docstring"""
def cross_entropy(__a , __a , __a=None ):
lowerCamelCase__: Tuple =logits.shape[-1]
lowerCamelCase__: Tuple =(labels[..., None] == jnp.arange(__a )[None]).astype("f4" )
lowerCamelCase__: str =jax.nn.log_softmax(__a , axis=-1 )
lowerCamelCase__: Optional[Any] =-jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowerCamelCase__: Optional[Any] =reduction(__a )
return loss
lowerCamelCase__: str =partial(__a , reduction=jnp.mean )
lowerCamelCase__: str =cross_entropy(__a , __a )
lowerCamelCase__: Optional[int] =cross_entropy(__a , __a )
lowerCamelCase__: Optional[Any] =cross_entropy(__a , __a )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = "google/bigbird-roberta-base"
lowercase_ = 3000
lowercase_ = 1_0500
lowercase_ = 128
lowercase_ = 3
lowercase_ = 1
lowercase_ = 5
# tx_args
lowercase_ = 3E-5
lowercase_ = 0.0
lowercase_ = 2_0000
lowercase_ = 0.0095
lowercase_ = "bigbird-roberta-natural-questions"
lowercase_ = "training-expt"
lowercase_ = "data/nq-training.jsonl"
lowercase_ = "data/nq-validation.jsonl"
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->List[str]:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =os.path.join(self.base_dir , self.save_dir)
lowerCamelCase__: List[str] =self.batch_size_per_device * jax.device_count()
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = 42
lowercase_ = 4096 # no dynamic padding on TPUs
def __call__(self : List[Any] , UpperCAmelCase_ : Optional[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.collate_fn(UpperCAmelCase_)
lowerCamelCase__: List[Any] =jax.tree_util.tree_map(UpperCAmelCase_ , UpperCAmelCase_)
return batch
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.fetch_inputs(features["input_ids"])
lowerCamelCase__: Union[str, Any] ={
"input_ids": jnp.array(UpperCAmelCase_ , dtype=jnp.intaa),
"attention_mask": jnp.array(UpperCAmelCase_ , dtype=jnp.intaa),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa),
}
return batch
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : list) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =[self._fetch_inputs(UpperCAmelCase_) for ids in input_ids]
return zip(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : list) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =[1 for _ in range(len(UpperCAmelCase_))]
while len(UpperCAmelCase_) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def lowerCAmelCase_ ( __a , __a , __a=None ) -> str:
"""simple docstring"""
if seed is not None:
lowerCamelCase__: Any =dataset.shuffle(seed=__a )
for i in range(len(__a ) // batch_size ):
lowerCamelCase__: Any =dataset[i * batch_size : (i + 1) * batch_size]
yield dict(__a )
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase_ ( __a , __a , **__a ) -> List[str]:
"""simple docstring"""
def loss_fn(__a ):
lowerCamelCase__: Optional[int] =model_inputs.pop("start_labels" )
lowerCamelCase__: int =model_inputs.pop("end_labels" )
lowerCamelCase__: List[str] =model_inputs.pop("pooled_labels" )
lowerCamelCase__: Optional[int] =state.apply_fn(**__a , params=__a , dropout_rng=__a , train=__a )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[Any] =outputs
return state.loss_fn(
__a , __a , __a , __a , __a , __a , )
lowerCamelCase__ , lowerCamelCase__: int =jax.random.split(__a )
lowerCamelCase__: Optional[Any] =jax.value_and_grad(__a )
lowerCamelCase__ , lowerCamelCase__: List[str] =grad_fn(state.params )
lowerCamelCase__: Optional[Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" )
lowerCamelCase__: List[str] =jax.lax.pmean(__a , "batch" )
lowerCamelCase__: List[str] =state.apply_gradients(grads=__a )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def lowerCAmelCase_ ( __a , **__a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: int =model_inputs.pop("start_labels" )
lowerCamelCase__: List[str] =model_inputs.pop("end_labels" )
lowerCamelCase__: int =model_inputs.pop("pooled_labels" )
lowerCamelCase__: Optional[Any] =state.apply_fn(**__a , params=state.params , train=__a )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[str] =outputs
lowerCamelCase__: Optional[int] =state.loss_fn(__a , __a , __a , __a , __a , __a )
lowerCamelCase__: Optional[Any] =jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class _SCREAMING_SNAKE_CASE ( train_state.TrainState ):
'''simple docstring'''
lowercase_ = struct.field(pytree_node=__SCREAMING_SNAKE_CASE )
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=None) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Dict =model.params
lowerCamelCase__: Tuple =TrainState.create(
apply_fn=model.__call__ , params=UpperCAmelCase_ , tx=UpperCAmelCase_ , loss_fn=UpperCAmelCase_ , )
if ckpt_dir is not None:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Any =restore_checkpoint(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Tuple ={
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
lowerCamelCase__ , lowerCamelCase__: List[Any] =build_tx(**UpperCAmelCase_)
lowerCamelCase__: str =train_state.TrainState(
step=UpperCAmelCase_ , apply_fn=model.__call__ , params=UpperCAmelCase_ , tx=UpperCAmelCase_ , opt_state=UpperCAmelCase_ , )
lowerCamelCase__: Tuple =args
lowerCamelCase__: Tuple =data_collator
lowerCamelCase__: str =lr
lowerCamelCase__: Dict =params
lowerCamelCase__: List[str] =jax_utils.replicate(UpperCAmelCase_)
return state
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.args
lowerCamelCase__: Any =len(UpperCAmelCase_) // args.batch_size
lowerCamelCase__: List[str] =jax.random.PRNGKey(0)
lowerCamelCase__: Optional[Any] =jax.random.split(UpperCAmelCase_ , jax.device_count())
for epoch in range(args.max_epochs):
lowerCamelCase__: Union[str, Any] =jnp.array(0 , dtype=jnp.floataa)
lowerCamelCase__: str =get_batched_dataset(UpperCAmelCase_ , args.batch_size , seed=UpperCAmelCase_)
lowerCamelCase__: Dict =0
for batch in tqdm(UpperCAmelCase_ , total=UpperCAmelCase_ , desc=F"""Running EPOCH-{epoch}"""):
lowerCamelCase__: List[str] =self.data_collator(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[int] =self.train_step_fn(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_)
running_loss += jax_utils.unreplicate(metrics["loss"])
i += 1
if i % args.logging_steps == 0:
lowerCamelCase__: Optional[int] =jax_utils.unreplicate(state.step)
lowerCamelCase__: List[Any] =running_loss.item() / i
lowerCamelCase__: Tuple =self.scheduler_fn(state_step - 1)
lowerCamelCase__: Union[str, Any] =self.evaluate(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Dict ={
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(UpperCAmelCase_))
self.logger.log(UpperCAmelCase_ , commit=UpperCAmelCase_)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"""-e{epoch}-s{i}""" , state=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str) ->Any:
'''simple docstring'''
lowerCamelCase__: List[Any] =get_batched_dataset(UpperCAmelCase_ , self.args.batch_size)
lowerCamelCase__: List[str] =len(UpperCAmelCase_) // self.args.batch_size
lowerCamelCase__: str =jnp.array(0 , dtype=jnp.floataa)
lowerCamelCase__: Optional[Any] =0
for batch in tqdm(UpperCAmelCase_ , total=UpperCAmelCase_ , desc="Evaluating ... "):
lowerCamelCase__: int =self.data_collator(UpperCAmelCase_)
lowerCamelCase__: str =self.val_step_fn(UpperCAmelCase_ , **UpperCAmelCase_)
running_loss += jax_utils.unreplicate(metrics["loss"])
i += 1
return running_loss / i
def SCREAMING_SNAKE_CASE_ (self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]) ->int:
'''simple docstring'''
lowerCamelCase__: Any =jax_utils.unreplicate(UpperCAmelCase_)
print(F"""SAVING CHECKPOINT IN {save_dir}""" , end=" ... ")
self.model_save_fn(UpperCAmelCase_ , params=state.params)
with open(os.path.join(UpperCAmelCase_ , "opt_state.msgpack") , "wb") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(UpperCAmelCase_ , "args.joblib"))
joblib.dump(self.data_collator , os.path.join(UpperCAmelCase_ , "data_collator.joblib"))
with open(os.path.join(UpperCAmelCase_ , "training_state.json") , "w") as f:
json.dump({"step": state.step.item()} , UpperCAmelCase_)
print("DONE")
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
print(F"""RESTORING CHECKPOINT FROM {save_dir}""" , end=" ... " )
with open(os.path.join(__a , "flax_model.msgpack" ) , "rb" ) as f:
lowerCamelCase__: Tuple =from_bytes(state.params , f.read() )
with open(os.path.join(__a , "opt_state.msgpack" ) , "rb" ) as f:
lowerCamelCase__: Optional[int] =from_bytes(state.opt_state , f.read() )
lowerCamelCase__: Any =joblib.load(os.path.join(__a , "args.joblib" ) )
lowerCamelCase__: Union[str, Any] =joblib.load(os.path.join(__a , "data_collator.joblib" ) )
with open(os.path.join(__a , "training_state.json" ) , "r" ) as f:
lowerCamelCase__: Optional[Any] =json.load(__a )
lowerCamelCase__: Any =training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: int =num_train_steps - warmup_steps
lowerCamelCase__: str =optax.linear_schedule(init_value=__a , end_value=__a , transition_steps=__a )
lowerCamelCase__: Optional[Any] =optax.linear_schedule(init_value=__a , end_value=1e-7 , transition_steps=__a )
lowerCamelCase__: List[Any] =optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> str:
"""simple docstring"""
def weight_decay_mask(__a ):
lowerCamelCase__: List[str] =traverse_util.flatten_dict(__a )
lowerCamelCase__: List[str] ={k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(__a )
lowerCamelCase__: Optional[Any] =scheduler_fn(__a , __a , __a , __a )
lowerCamelCase__: Tuple =optax.adamw(learning_rate=__a , weight_decay=__a , mask=__a )
return tx, lr
| 59 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
__UpperCamelCase : Tuple = False
__UpperCamelCase : Dict = False
def lowercase ( lowerCAmelCase : Namespace):
"""simple docstring"""
return TrainCommand(lowerCAmelCase)
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
@staticmethod
def _lowerCamelCase ( UpperCAmelCase__ ) -> List[Any]:
_A : int = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=UpperCAmelCase__ , required=UpperCAmelCase__ , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=UpperCAmelCase__ , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=UpperCAmelCase__ , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=UpperCAmelCase__ , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=UpperCAmelCase__ , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=UpperCAmelCase__ , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=UpperCAmelCase__ , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=UpperCAmelCase__ , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=UpperCAmelCase__ , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=UpperCAmelCase__ , default=3_2 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=UpperCAmelCase__ , default=6_4 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=UpperCAmelCase__ , default=3e-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=UpperCAmelCase__ , default=1e-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=UpperCAmelCase__ )
def __init__( self , UpperCAmelCase__ ) -> Tuple:
_A : Optional[int] = logging.get_logger('''transformers-cli/training''' )
_A : Tuple = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=UpperCAmelCase__ )
_A : str = args.output
_A : List[str] = args.column_label
_A : Optional[Any] = args.column_text
_A : Union[str, Any] = args.column_id
self.logger.info(F"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
_A : int = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"""Loading dataset from {args.train_data}""" )
_A : Union[str, Any] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_A : Optional[Any] = None
if args.validation_data:
self.logger.info(F"""Loading validation dataset from {args.validation_data}""" )
_A : int = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
_A : Union[str, Any] = args.validation_split
_A : Dict = args.train_batch_size
_A : Union[str, Any] = args.valid_batch_size
_A : Optional[Any] = args.learning_rate
_A : Any = args.adam_epsilon
def _lowerCamelCase ( self ) -> Union[str, Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _lowerCamelCase ( self ) -> List[Any]:
raise NotImplementedError
def _lowerCamelCase ( self ) -> List[str]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 417 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Optional[Any] = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''ConvNextFeatureExtractor''']
__UpperCamelCase : Union[str, Any] = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Union[str, Any] = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 417 | 1 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=sys.maxsize ):
lowercase_ :Optional[Any] = '''bilinear'''
lowercase_ :Tuple = max_size
lowercase_ :int = short_edge_length
def __call__( self , UpperCamelCase_ ):
lowercase_ :Any = []
for img in imgs:
lowercase_ , lowercase_ :str = img.shape[:2]
# later: provide list and randomly choose index for resize
lowercase_ :List[str] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowercase_ :int = size * 1.0 / min(UpperCamelCase_ , UpperCamelCase_ )
if h < w:
lowercase_ , lowercase_ :List[str] = size, scale * w
else:
lowercase_ , lowercase_ :Optional[int] = scale * h, size
if max(UpperCamelCase_ , UpperCamelCase_ ) > self.max_size:
lowercase_ :str = self.max_size * 1.0 / max(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Optional[int] = newh * scale
lowercase_ :Optional[Any] = neww * scale
lowercase_ :Dict = int(neww + 0.5 )
lowercase_ :Optional[Any] = int(newh + 0.5 )
if img.dtype == np.uinta:
lowercase_ :int = Image.fromarray(UpperCamelCase_ )
lowercase_ :Tuple = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowercase_ :Optional[Any] = np.asarray(UpperCamelCase_ )
else:
lowercase_ :Optional[int] = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowercase_ :int = nn.functional.interpolate(
UpperCamelCase_ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase_ ).squeeze(0 )
img_augs.append(UpperCamelCase_ )
return img_augs
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ ):
lowercase_ :Optional[int] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowercase_ :List[Any] = cfg.INPUT.FORMAT
lowercase_ :Tuple = cfg.SIZE_DIVISIBILITY
lowercase_ :Dict = cfg.PAD_VALUE
lowercase_ :int = cfg.INPUT.MAX_SIZE_TEST
lowercase_ :Union[str, Any] = cfg.MODEL.DEVICE
lowercase_ :Any = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase_ :Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase_ :int = lambda UpperCamelCase_ : (x - self.pixel_mean) / self.pixel_std
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Dict = tuple(max(UpperCamelCase_ ) for s in zip(*[img.shape for img in images] ) )
lowercase_ :Optional[Any] = [im.shape[-2:] for im in images]
lowercase_ :List[str] = [
nn.functional.pad(
UpperCamelCase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(UpperCamelCase_ , UpperCamelCase_ )
]
return torch.stack(UpperCamelCase_ ), torch.tensor(UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_=False ):
with torch.no_grad():
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = [images]
if single_image:
assert len(UpperCamelCase_ ) == 1
for i in range(len(UpperCamelCase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(UpperCamelCase_ , images.pop(UpperCamelCase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
UpperCamelCase_ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowercase_ :str = torch.tensor([im.shape[:2] for im in images] )
lowercase_ :str = self.aug(UpperCamelCase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowercase_ :List[Any] = [self.normalizer(UpperCamelCase_ ) for x in images]
# now pad them to do the following operations
lowercase_ , lowercase_ :str = self.pad(UpperCamelCase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowercase_ :Union[str, Any] = torch.true_divide(UpperCamelCase_ , UpperCamelCase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def UpperCamelCase ( _a , _a ) -> int:
'''simple docstring'''
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def UpperCamelCase ( _a , _a ) -> Optional[Any]:
'''simple docstring'''
assert torch.isfinite(_a ).all(), "Box tensor contains infinite or NaN!"
lowercase_ , lowercase_ :int = box_size
tensor[:, 0].clamp_(min=0 , max=_a )
tensor[:, 1].clamp_(min=0 , max=_a )
tensor[:, 2].clamp_(min=0 , max=_a )
tensor[:, 3].clamp_(min=0 , max=_a )
| 257 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Union[str, Any] =(DPMSolverSDEScheduler,)
lowercase : Any =10
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Union[str, Any] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**UpperCamelCase_ )
return config
def UpperCamelCase ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Any = self.scheduler_classes[0]
lowercase_ :Tuple = self.get_scheduler_config()
lowercase_ :int = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase_ :Union[str, Any] = self.dummy_model()
lowercase_ :Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase_ :int = sample.to(UpperCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ :Optional[Any] = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Any = model(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Tuple = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Dict = output.prev_sample
lowercase_ :Dict = torch.sum(torch.abs(UpperCamelCase_ ) )
lowercase_ :Union[str, Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self.scheduler_classes[0]
lowercase_ :Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowercase_ :Union[str, Any] = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase_ :List[str] = self.dummy_model()
lowercase_ :Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase_ :Optional[int] = sample.to(UpperCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ :Any = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :str = model(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Optional[int] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :List[str] = output.prev_sample
lowercase_ :Union[str, Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowercase_ :int = torch.mean(torch.abs(UpperCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self.scheduler_classes[0]
lowercase_ :List[str] = self.get_scheduler_config()
lowercase_ :Tuple = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase_ )
lowercase_ :Tuple = self.dummy_model()
lowercase_ :str = self.dummy_sample_deter.to(UpperCamelCase_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowercase_ :str = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :int = model(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :int = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :List[str] = output.prev_sample
lowercase_ :Dict = torch.sum(torch.abs(UpperCamelCase_ ) )
lowercase_ :Dict = torch.mean(torch.abs(UpperCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def UpperCamelCase ( self ):
lowercase_ :Any = self.scheduler_classes[0]
lowercase_ :Optional[int] = self.get_scheduler_config()
lowercase_ :Tuple = scheduler_class(**UpperCamelCase_ , use_karras_sigmas=UpperCamelCase_ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase_ )
lowercase_ :List[str] = self.dummy_model()
lowercase_ :Dict = self.dummy_sample_deter.to(UpperCamelCase_ ) * scheduler.init_noise_sigma
lowercase_ :Union[str, Any] = sample.to(UpperCamelCase_ )
for t in scheduler.timesteps:
lowercase_ :List[Any] = scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :Optional[int] = model(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :int = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :int = output.prev_sample
lowercase_ :List[Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
lowercase_ :Tuple = torch.mean(torch.abs(UpperCamelCase_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 257 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710 |
'''simple docstring'''
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int:
UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 644 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( __A : Dict ):
if len(_a ) <= 1:
return lst
a_ : Dict = 1
while i < len(_a ):
if lst[i - 1] <= lst[i]:
i += 1
else:
a_ : List[Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
a_ : Any = 1
return lst
if __name__ == "__main__":
__lowerCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 466 |
import math
def UpperCamelCase ( _a ) -> bool:
'''simple docstring'''
lowercase_ :int = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_a )
def UpperCamelCase ( _a = 1 / 1_2_3_4_5 ) -> int:
'''simple docstring'''
lowercase_ :Union[str, Any] = 0
lowercase_ :List[str] = 0
lowercase_ :int = 3
while True:
lowercase_ :Optional[Any] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_a ):
lowercase_ :List[Any] = int(_a )
total_partitions += 1
if check_partition_perfect(_a ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_a )
integer += 1
if __name__ == "__main__":
print(f"{solution() = }")
| 257 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = """▁"""
a_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
}
a_ = {
"""vocab_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json"""
),
},
"""spm_file""": {
"""facebook/s2t-small-librispeech-asr""": (
"""https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model"""
)
},
}
a_ = {
"""facebook/s2t-small-librispeech-asr""": 1024,
}
a_ = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""]
a_ = {"""mustc""": MUSTC_LANGS}
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = VOCAB_FILES_NAMES
lowerCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Union[str, Any] = MAX_MODEL_INPUT_SIZES
lowerCAmelCase__ : List[str] = ['input_ids', 'attention_mask']
lowerCAmelCase__ : List[int] = []
def __init__( self: str , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: str , __lowerCAmelCase: int="<s>" , __lowerCAmelCase: Tuple="</s>" , __lowerCAmelCase: List[Any]="<pad>" , __lowerCAmelCase: Union[str, Any]="<unk>" , __lowerCAmelCase: Optional[int]=False , __lowerCAmelCase: Dict=False , __lowerCAmelCase: List[Any]=None , __lowerCAmelCase: List[Any]=None , __lowerCAmelCase: Optional[Dict[str, Any]] = None , **__lowerCAmelCase: List[str] , ) -> None:
'''simple docstring'''
__UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_upper_case=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , lang_codes=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCAmelCase , )
__UpperCAmelCase = do_upper_case
__UpperCAmelCase = do_lower_case
__UpperCAmelCase = load_json(__lowerCAmelCase )
__UpperCAmelCase = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase = spm_file
__UpperCAmelCase = load_spm(__lowerCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__UpperCAmelCase = lang_codes
__UpperCAmelCase = LANGUAGES[lang_codes]
__UpperCAmelCase = [F'''<lang:{lang}>''' for lang in self.langs]
__UpperCAmelCase = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''' ) for lang in self.langs}
__UpperCAmelCase = self.lang_tokens
__UpperCAmelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__UpperCAmelCase = {}
@property
def _UpperCAmelCase ( self: List[Any] ) -> int:
'''simple docstring'''
return len(self.encoder )
@property
def _UpperCAmelCase ( self: List[str] ) -> str:
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: Optional[int] ) -> None:
'''simple docstring'''
__UpperCAmelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(__lowerCAmelCase )
def _UpperCAmelCase ( self: Tuple , __lowerCAmelCase: str ) -> None:
'''simple docstring'''
__UpperCAmelCase = self.lang_code_to_id[tgt_lang]
__UpperCAmelCase = [lang_code_id]
def _UpperCAmelCase ( self: List[Any] , __lowerCAmelCase: str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
def _UpperCAmelCase ( self: int , __lowerCAmelCase: List[Any] ) -> List[Any]:
'''simple docstring'''
return self.encoder.get(__lowerCAmelCase , self.encoder[self.unk_token] )
def _UpperCAmelCase ( self: int , __lowerCAmelCase: int ) -> str:
'''simple docstring'''
return self.decoder.get(__lowerCAmelCase , self.unk_token )
def _UpperCAmelCase ( self: Tuple , __lowerCAmelCase: List[str] ) -> str:
'''simple docstring'''
__UpperCAmelCase = []
__UpperCAmelCase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__UpperCAmelCase = self.sp_model.decode(__lowerCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__UpperCAmelCase = []
else:
current_sub_tokens.append(__lowerCAmelCase )
__UpperCAmelCase = self.sp_model.decode(__lowerCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Tuple=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: List[int] , __lowerCAmelCase: Optional[List[int]] = None , __lowerCAmelCase: bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
__UpperCAmelCase = [1] * len(self.prefix_tokens )
__UpperCAmelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__lowerCAmelCase )) + ([0] * len(__lowerCAmelCase )) + suffix_ones
def _UpperCAmelCase ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self: Any , __lowerCAmelCase: Dict ) -> None:
'''simple docstring'''
__UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__UpperCAmelCase = {}
__UpperCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: str , __lowerCAmelCase: Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__UpperCAmelCase = Path(__lowerCAmelCase )
assert save_dir.is_dir(), F'''{save_directory} should be a directory'''
__UpperCAmelCase = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
__UpperCAmelCase = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(__lowerCAmelCase , "wb" ) as fi:
__UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (str(__lowerCAmelCase ), str(__lowerCAmelCase ))
def __lowerCAmelCase ( A_ : str , A_ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
__UpperCAmelCase = sentencepiece.SentencePieceProcessor(**A_ )
spm.Load(str(A_ ) )
return spm
def __lowerCAmelCase ( A_ : str ) -> Union[Dict, List]:
with open(A_ , "r" ) as f:
return json.load(A_ )
def __lowerCAmelCase ( A_ : Optional[int] , A_ : str ) -> None:
with open(A_ , "w" ) as f:
json.dump(A_ , A_ , indent=2 )
| 701 | from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _UpperCAmelCase ( __lowerCAmelCase: ArgumentParser ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
@abstractmethod
def _UpperCAmelCase ( self: List[str] ) -> List[Any]:
'''simple docstring'''
raise NotImplementedError()
| 286 | 0 |
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class a__ ( UpperCamelCase_ ):
def __init__( self : List[str] ,a__ : UNetaDModel ,a__ : UNetaDModel ,a__ : DDPMScheduler ,a__ : int ,) -> Dict:
"""simple docstring"""
super().__init__()
_lowerCAmelCase:int = value_function
_lowerCAmelCase:Any = unet
_lowerCAmelCase:Optional[int] = scheduler
_lowerCAmelCase:Tuple = env
_lowerCAmelCase:str = env.get_dataset()
_lowerCAmelCase:Union[str, Any] = {}
for key in self.data.keys():
try:
_lowerCAmelCase:str = self.data[key].mean()
except: # noqa: E722
pass
_lowerCAmelCase:Optional[Any] = {}
for key in self.data.keys():
try:
_lowerCAmelCase:Tuple = self.data[key].std()
except: # noqa: E722
pass
_lowerCAmelCase:Union[str, Any] = env.observation_space.shape[0]
_lowerCAmelCase:Optional[Any] = env.action_space.shape[0]
def __UpperCamelCase ( self : Any ,a__ : Dict ,a__ : Any) -> Any:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def __UpperCamelCase ( self : str ,a__ : Optional[Any] ,a__ : str) -> List[Any]:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def __UpperCamelCase ( self : str ,a__ : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
if type(a__) is dict:
return {k: self.to_torch(a__) for k, v in x_in.items()}
elif torch.is_tensor(a__):
return x_in.to(self.unet.device)
return torch.tensor(a__ ,device=self.unet.device)
def __UpperCamelCase ( self : Optional[Any] ,a__ : Any ,a__ : List[Any] ,a__ : List[str]) -> Tuple:
"""simple docstring"""
for key, val in cond.items():
_lowerCAmelCase:Any = val.clone()
return x_in
def __UpperCamelCase ( self : Any ,a__ : List[str] ,a__ : str ,a__ : Optional[int] ,a__ : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = x.shape[0]
_lowerCAmelCase:int = None
for i in tqdm.tqdm(self.scheduler.timesteps):
# create batch of timesteps to pass into model
_lowerCAmelCase:Optional[int] = torch.full((batch_size,) ,a__ ,device=self.unet.device ,dtype=torch.long)
for _ in range(a__):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_lowerCAmelCase:Dict = self.value_function(x.permute(0 ,2 ,1) ,a__).sample
_lowerCAmelCase:List[Any] = torch.autograd.grad([y.sum()] ,[x])[0]
_lowerCAmelCase:List[str] = self.scheduler._get_variance(a__)
_lowerCAmelCase:Dict = torch.exp(0.5 * posterior_variance)
_lowerCAmelCase:List[str] = model_std * grad
_lowerCAmelCase:Dict = 0
_lowerCAmelCase:Optional[Any] = x.detach()
_lowerCAmelCase:List[str] = x + scale * grad
_lowerCAmelCase:Tuple = self.reset_xa(a__ ,a__ ,self.action_dim)
_lowerCAmelCase:Tuple = self.unet(x.permute(0 ,2 ,1) ,a__).sample.permute(0 ,2 ,1)
# TODO: verify deprecation of this kwarg
_lowerCAmelCase:int = self.scheduler.step(a__ ,a__ ,a__ ,predict_epsilon=a__)['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
_lowerCAmelCase:str = self.reset_xa(a__ ,a__ ,self.action_dim)
_lowerCAmelCase:Dict = self.to_torch(a__)
return x, y
def __call__( self : Optional[Any] ,a__ : Optional[int] ,a__ : Tuple=64 ,a__ : Dict=32 ,a__ : Dict=2 ,a__ : str=0.1) -> int:
"""simple docstring"""
_lowerCAmelCase:Dict = self.normalize(a__ ,'''observations''')
_lowerCAmelCase:int = obs[None].repeat(a__ ,axis=0)
_lowerCAmelCase:Any = {0: self.to_torch(a__)}
_lowerCAmelCase:Optional[int] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_lowerCAmelCase:int = randn_tensor(a__ ,device=self.unet.device)
_lowerCAmelCase:Optional[Any] = self.reset_xa(a__ ,a__ ,self.action_dim)
_lowerCAmelCase:Optional[Any] = self.to_torch(a__)
# run the diffusion process
_lowerCAmelCase , _lowerCAmelCase:List[str] = self.run_diffusion(a__ ,a__ ,a__ ,a__)
# sort output trajectories by value
_lowerCAmelCase:List[Any] = y.argsort(0 ,descending=a__).squeeze()
_lowerCAmelCase:Any = x[sorted_idx]
_lowerCAmelCase:Optional[Any] = sorted_values[:, :, : self.action_dim]
_lowerCAmelCase:List[str] = actions.detach().cpu().numpy()
_lowerCAmelCase:List[Any] = self.de_normalize(a__ ,key='''actions''')
# select the action with the highest value
if y is not None:
_lowerCAmelCase:Optional[Any] = 0
else:
# if we didn't run value guiding, select a random action
_lowerCAmelCase:List[Any] = np.random.randint(0 ,a__)
_lowerCAmelCase:Any = denorm_actions[selected_index, 0]
return denorm_actions
| 227 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def UpperCAmelCase ( snake_case : List[str] ):
_lowerCAmelCase:List[Any] = SwinvaConfig()
_lowerCAmelCase:Union[str, Any] = swinva_name.split('''_''' )
_lowerCAmelCase:int = name_split[1]
if "to" in name_split[3]:
_lowerCAmelCase:List[Any] = int(name_split[3][-3:] )
else:
_lowerCAmelCase:Union[str, Any] = int(name_split[3] )
if "to" in name_split[2]:
_lowerCAmelCase:Any = int(name_split[2][-2:] )
else:
_lowerCAmelCase:Optional[int] = int(name_split[2][6:] )
if model_size == "tiny":
_lowerCAmelCase:Optional[Any] = 96
_lowerCAmelCase:Any = (2, 2, 6, 2)
_lowerCAmelCase:Any = (3, 6, 12, 24)
elif model_size == "small":
_lowerCAmelCase:int = 96
_lowerCAmelCase:Union[str, Any] = (2, 2, 18, 2)
_lowerCAmelCase:Optional[Any] = (3, 6, 12, 24)
elif model_size == "base":
_lowerCAmelCase:Tuple = 128
_lowerCAmelCase:str = (2, 2, 18, 2)
_lowerCAmelCase:Optional[int] = (4, 8, 16, 32)
else:
_lowerCAmelCase:List[str] = 192
_lowerCAmelCase:Any = (2, 2, 18, 2)
_lowerCAmelCase:Dict = (6, 12, 24, 48)
if "to" in swinva_name:
_lowerCAmelCase:List[str] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
_lowerCAmelCase:str = 21841
_lowerCAmelCase:Dict = '''huggingface/label-files'''
_lowerCAmelCase:Union[str, Any] = '''imagenet-22k-id2label.json'''
_lowerCAmelCase:List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowerCAmelCase:Tuple = {int(snake_case ): v for k, v in idalabel.items()}
_lowerCAmelCase:Optional[Any] = idalabel
_lowerCAmelCase:str = {v: k for k, v in idalabel.items()}
else:
_lowerCAmelCase:str = 1000
_lowerCAmelCase:List[Any] = '''huggingface/label-files'''
_lowerCAmelCase:Tuple = '''imagenet-1k-id2label.json'''
_lowerCAmelCase:str = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
_lowerCAmelCase:str = {int(snake_case ): v for k, v in idalabel.items()}
_lowerCAmelCase:str = idalabel
_lowerCAmelCase:Optional[int] = {v: k for k, v in idalabel.items()}
_lowerCAmelCase:str = img_size
_lowerCAmelCase:Optional[int] = num_classes
_lowerCAmelCase:Optional[int] = embed_dim
_lowerCAmelCase:Any = depths
_lowerCAmelCase:Tuple = num_heads
_lowerCAmelCase:Dict = window_size
return config
def UpperCAmelCase ( snake_case : Union[str, Any] ):
if "patch_embed.proj" in name:
_lowerCAmelCase:Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
_lowerCAmelCase:List[str] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
_lowerCAmelCase:Dict = '''encoder.''' + name
if "attn.proj" in name:
_lowerCAmelCase:Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_lowerCAmelCase:str = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_lowerCAmelCase:List[Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_lowerCAmelCase:Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_lowerCAmelCase:Any = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCAmelCase:Tuple = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
_lowerCAmelCase:List[Any] = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
_lowerCAmelCase:List[str] = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
_lowerCAmelCase:Optional[Any] = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
_lowerCAmelCase:List[str] = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if name == "norm.weight":
_lowerCAmelCase:str = '''layernorm.weight'''
if name == "norm.bias":
_lowerCAmelCase:Optional[int] = '''layernorm.bias'''
if "head" in name:
_lowerCAmelCase:str = name.replace('''head''' , '''classifier''' )
else:
_lowerCAmelCase:str = '''swinv2.''' + name
return name
def UpperCAmelCase ( snake_case : Tuple , snake_case : Optional[Any] ):
for key in orig_state_dict.copy().keys():
_lowerCAmelCase:List[Any] = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
_lowerCAmelCase:str = key.split('''.''' )
_lowerCAmelCase:str = int(key_split[1] )
_lowerCAmelCase:Tuple = int(key_split[3] )
_lowerCAmelCase:List[Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
_lowerCAmelCase:List[str] = val[:dim, :]
_lowerCAmelCase:Tuple = val[dim : dim * 2, :]
_lowerCAmelCase:Any = val[-dim:, :]
else:
_lowerCAmelCase:Optional[int] = val[:dim]
_lowerCAmelCase:List[str] = val[
dim : dim * 2
]
_lowerCAmelCase:Any = val[-dim:]
else:
_lowerCAmelCase:Any = val
return orig_state_dict
def UpperCAmelCase ( snake_case : str , snake_case : Optional[Any] ):
_lowerCAmelCase:List[Any] = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
_lowerCAmelCase:Tuple = get_swinva_config(snake_case )
_lowerCAmelCase:Any = SwinvaForImageClassification(snake_case )
model.eval()
_lowerCAmelCase:Dict = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
_lowerCAmelCase:Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCAmelCase:Optional[int] = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) )
_lowerCAmelCase:Optional[int] = Image.open(requests.get(snake_case , stream=snake_case ).raw )
_lowerCAmelCase:Any = image_processor(images=snake_case , return_tensors='''pt''' )
_lowerCAmelCase:Any = timm_model(inputs['''pixel_values'''] )
_lowerCAmelCase:Optional[int] = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1e-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case )
model.push_to_hub(
repo_path_or_name=Path(snake_case , snake_case ) , organization='''nandwalritik''' , commit_message='''Add model''' , )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
UpperCamelCase__ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 227 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A_ ( UpperCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Dict = """roc_bert"""
def __init__( self , snake_case=3_0522 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=2 , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=0 , snake_case="absolute" , snake_case=None , snake_case=True , snake_case=True , snake_case=768 , snake_case=910 , snake_case=512 , snake_case=2_4858 , snake_case=True , **snake_case , ):
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = type_vocab_size
lowercase = layer_norm_eps
lowercase = use_cache
lowercase = enable_pronunciation
lowercase = enable_shape
lowercase = pronunciation_embed_dim
lowercase = pronunciation_vocab_size
lowercase = shape_embed_dim
lowercase = shape_vocab_size
lowercase = concat_input
lowercase = position_embedding_type
lowercase = classifier_dropout
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
| 710 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : int = """autoformer"""
_UpperCamelCase : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = [1, 2, 3, 4, 5, 6, 7] , snake_case = True , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 32 , snake_case = 32 , snake_case = "gelu" , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case = True , snake_case=True , snake_case = 10 , snake_case = 25 , snake_case = 3 , **snake_case , ):
# time series specific configuration
lowercase = prediction_length
lowercase = context_length if context_length is not None else prediction_length
lowercase = distribution_output
lowercase = loss
lowercase = input_size
lowercase = num_time_features
lowercase = lags_sequence
lowercase = scaling
lowercase = num_dynamic_real_features
lowercase = num_static_real_features
lowercase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
lowercase = cardinality
else:
lowercase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
lowercase = embedding_dimension
else:
lowercase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase = num_parallel_samples
# Transformer architecture configuration
lowercase = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase = d_model
lowercase = encoder_attention_heads
lowercase = decoder_attention_heads
lowercase = encoder_ffn_dim
lowercase = decoder_ffn_dim
lowercase = encoder_layers
lowercase = decoder_layers
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = activation_function
lowercase = init_std
lowercase = use_cache
# Autoformer
lowercase = label_length
lowercase = moving_average
lowercase = autocorrelation_factor
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 565 | 0 |
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> bool:
"""simple docstring"""
lowerCAmelCase__ = len(snake_case__ )
lowerCAmelCase__ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowerCAmelCase__ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowerCAmelCase__ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowerCAmelCase__ = subset[i - 1][j]
if arr[i - 1] <= j:
lowerCAmelCase__ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 193 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCAmelCase : List[str] = {"UserAgent": UserAgent().random}
def UpperCAmelCase_ ( snake_case__ ) -> dict:
"""simple docstring"""
lowerCAmelCase__ = script.contents[0]
lowerCAmelCase__ = json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __snake_case :
def __init__( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = f'https://www.instagram.com/{username}/'
lowerCAmelCase__ = self.get_json()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = requests.get(self.url ,headers=a_ ).text
lowerCAmelCase__ = BeautifulSoup(a_ ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
"""simple docstring"""
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self ):
"""simple docstring"""
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["username"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["full_name"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["biography"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["business_email"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["external_url"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["is_verified"]
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return self.user_data["is_private"]
def UpperCAmelCase_ ( snake_case__ = "github" ) -> None:
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
lowerCAmelCase__ = InstagramUser(snake_case__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , snake_case__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Optional[int] = InstagramUser("github")
print(instagram_user)
print(f"""{instagram_user.number_of_posts = }""")
print(f"""{instagram_user.number_of_followers = }""")
print(f"""{instagram_user.number_of_followings = }""")
print(f"""{instagram_user.email = }""")
print(f"""{instagram_user.website = }""")
print(f"""{instagram_user.profile_picture_url = }""")
print(f"""{instagram_user.is_verified = }""")
print(f"""{instagram_user.is_private = }""")
| 193 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _a :
"""simple docstring"""
def __init__( self ,__SCREAMING_SNAKE_CASE ,):
SCREAMING_SNAKE_CASE : str = parent
SCREAMING_SNAKE_CASE : Optional[int] = 13
SCREAMING_SNAKE_CASE : Dict = 7
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Tuple = 99
SCREAMING_SNAKE_CASE : Tuple = 32
SCREAMING_SNAKE_CASE : List[str] = 2
SCREAMING_SNAKE_CASE : int = 4
SCREAMING_SNAKE_CASE : Union[str, Any] = 37
SCREAMING_SNAKE_CASE : Optional[Any] = 'gelu'
SCREAMING_SNAKE_CASE : Any = 0.1
SCREAMING_SNAKE_CASE : Optional[Any] = 0.1
SCREAMING_SNAKE_CASE : Optional[Any] = 512
SCREAMING_SNAKE_CASE : str = 16
SCREAMING_SNAKE_CASE : Union[str, Any] = 2
SCREAMING_SNAKE_CASE : int = 0.02
SCREAMING_SNAKE_CASE : int = 3
SCREAMING_SNAKE_CASE : List[Any] = 4
SCREAMING_SNAKE_CASE : int = None
def __a ( self ):
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE : List[str] = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self ):
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : List[str] = TFEsmModel(config=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : int = model(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,):
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Dict = TFEsmModel(config=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
SCREAMING_SNAKE_CASE : str = model(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : int = model(__SCREAMING_SNAKE_CASE ,encoder_hidden_states=__SCREAMING_SNAKE_CASE )
# Also check the case where encoder outputs are not passed
SCREAMING_SNAKE_CASE : List[Any] = model(__SCREAMING_SNAKE_CASE ,attention_mask=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = TFEsmForMaskedLM(config=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Dict = self.num_labels
SCREAMING_SNAKE_CASE : str = TFEsmForTokenClassification(config=__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
SCREAMING_SNAKE_CASE : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __a ( self ):
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
A = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
A = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
A = False
A = False
def __a ( self ):
SCREAMING_SNAKE_CASE : Dict = TFEsmModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,hidden_size=37 )
def __a ( self ):
self.config_tester.run_common_tests()
def __a ( self ):
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __a ( self ):
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__SCREAMING_SNAKE_CASE )
def __a ( self ):
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def __a ( self ):
SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __a ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Dict = TFEsmModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@unittest.skip('Protein models do not support embedding resizing.' )
def __a ( self ):
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def __a ( self ):
pass
def __a ( self ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(__SCREAMING_SNAKE_CASE )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
SCREAMING_SNAKE_CASE : Tuple = model.get_bias()
assert isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
for k, v in name.items():
assert isinstance(__SCREAMING_SNAKE_CASE ,tf.Variable )
else:
SCREAMING_SNAKE_CASE : List[str] = model.get_output_embeddings()
assert x is None
SCREAMING_SNAKE_CASE : Dict = model.get_bias()
assert name is None
@require_tf
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __a ( self ):
SCREAMING_SNAKE_CASE : List[str] = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
SCREAMING_SNAKE_CASE : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Optional[Any] = model(__SCREAMING_SNAKE_CASE )[0]
SCREAMING_SNAKE_CASE : Tuple = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,__SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : List[Any] = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def __a ( self ):
SCREAMING_SNAKE_CASE : Tuple = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
SCREAMING_SNAKE_CASE : Any = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE : List[Any] = model(__SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : str = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 220 |
'''simple docstring'''
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Dict ) -> List[str]:
# load base model
SCREAMING_SNAKE_CASE : int = StableDiffusionPipeline.from_pretrained(snake_case_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
SCREAMING_SNAKE_CASE : List[str] = load_file(snake_case_ )
SCREAMING_SNAKE_CASE : List[str] = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
SCREAMING_SNAKE_CASE : int = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' )
SCREAMING_SNAKE_CASE : int = pipeline.text_encoder
else:
SCREAMING_SNAKE_CASE : Optional[int] = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.unet
# find the target layer
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_infos.pop(0 )
while len(snake_case_ ) > -1:
try:
SCREAMING_SNAKE_CASE : int = curr_layer.__getattr__(snake_case_ )
if len(snake_case_ ) > 0:
SCREAMING_SNAKE_CASE : int = layer_infos.pop(0 )
elif len(snake_case_ ) == 0:
break
except Exception:
if len(snake_case_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
SCREAMING_SNAKE_CASE : List[str] = layer_infos.pop(0 )
SCREAMING_SNAKE_CASE : str = []
if "lora_down" in key:
pair_keys.append(key.replace('lora_down' , 'lora_up' ) )
pair_keys.append(snake_case_ )
else:
pair_keys.append(snake_case_ )
pair_keys.append(key.replace('lora_up' , 'lora_down' ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
SCREAMING_SNAKE_CASE : Any = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
SCREAMING_SNAKE_CASE : List[str] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case_ , snake_case_ ).unsqueeze(2 ).unsqueeze(3 )
else:
SCREAMING_SNAKE_CASE : Tuple = state_dict[pair_keys[0]].to(torch.floataa )
SCREAMING_SNAKE_CASE : Any = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(snake_case_ , snake_case_ )
# update visited list
for item in pair_keys:
visited.append(snake_case_ )
return pipeline
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.'
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors'
)
parser.add_argument(
'--lora_prefix_text_encoder',
default='lora_te',
type=str,
help='The prefix of text encoder weight in safetensors',
)
parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW')
parser.add_argument(
'--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.'
)
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = args.base_model_path
__UpperCAmelCase = args.checkpoint_path
__UpperCAmelCase = args.dump_path
__UpperCAmelCase = args.lora_prefix_unet
__UpperCAmelCase = args.lora_prefix_text_encoder
__UpperCAmelCase = args.alpha
__UpperCAmelCase = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__UpperCAmelCase = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 220 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__UpperCAmelCase : int = False
@skip_mps
class lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase : int = StableDiffusionAttendAndExcitePipeline
UpperCAmelCase : str = False
UpperCAmelCase : List[str] = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
UpperCAmelCase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def snake_case_ ( cls : Any ) -> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__snake_case )
@classmethod
def snake_case_ ( cls : str ) -> Dict:
super().tearDownClass()
torch.use_deterministic_algorithms(__snake_case )
def snake_case_ ( self : str ) -> int:
torch.manual_seed(0 )
_a : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__snake_case , )
_a : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
_a : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_a : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
_a : List[Any] = CLIPTextModel(__snake_case )
_a : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case_ ( self : str , __snake_case : Optional[int] , __snake_case : Optional[Any]=0 ) -> Any:
if str(__snake_case ).startswith('''mps''' ):
_a : List[str] = torch.manual_seed(__snake_case )
else:
_a : Union[str, Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
_a : Dict = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def snake_case_ ( self : List[Any] ) -> Optional[Any]:
_a : List[Any] = '''cpu'''
_a : Optional[int] = self.get_dummy_components()
_a : int = self.pipeline_class(**__snake_case )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
_a : int = self.get_dummy_inputs(__snake_case )
_a : List[str] = pipe(**__snake_case ).images
_a : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
_a : Optional[int] = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
_a : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__snake_case , 1E-3 )
def snake_case_ ( self : List[Any] ) -> str:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def snake_case_ ( self : List[str] ) -> Dict:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self : List[Any] ) -> List[str]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def snake_case_ ( self : Tuple ) -> Union[str, Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def snake_case_ ( self : Union[str, Any] ) -> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def snake_case_ ( self : List[Any] ) -> List[str]:
super().test_save_load_local(expected_max_difference=5E-4 )
def snake_case_ ( self : List[Any] ) -> Any:
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
@classmethod
def snake_case_ ( cls : Optional[Any] ) -> List[Any]:
super().setUpClass()
torch.use_deterministic_algorithms(__snake_case )
@classmethod
def snake_case_ ( cls : str ) -> Union[str, Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(__snake_case )
def snake_case_ ( self : List[str] ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self : Union[str, Any] ) -> Optional[int]:
_a : Dict = torch.manual_seed(51 )
_a : List[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , safety_checker=__snake_case , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
_a : Any = '''a painting of an elephant with glasses'''
_a : Tuple = [5, 7]
_a : Tuple = pipe(
prompt=__snake_case , token_indices=__snake_case , guidance_scale=7.5 , generator=__snake_case , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0]
_a : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 471 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
__UpperCAmelCase : str = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
__UpperCAmelCase : Union[str, Any] = {
'ctrl': 256,
}
__UpperCAmelCase : Tuple = {
'Pregnancy': 168_629,
'Christianity': 7_675,
'Explain': 106_423,
'Fitness': 63_440,
'Saving': 63_163,
'Ask': 27_171,
'Ass': 95_985,
'Joke': 163_509,
'Questions': 45_622,
'Thoughts': 49_605,
'Retail': 52_342,
'Feminism': 164_338,
'Writing': 11_992,
'Atheism': 192_263,
'Netflix': 48_616,
'Computing': 39_639,
'Opinion': 43_213,
'Alone': 44_967,
'Funny': 58_917,
'Gaming': 40_358,
'Human': 4_088,
'India': 1_331,
'Joker': 77_138,
'Diet': 36_206,
'Legal': 11_859,
'Norman': 4_939,
'Tip': 72_689,
'Weight': 52_343,
'Movies': 46_273,
'Running': 23_425,
'Science': 2_090,
'Horror': 37_793,
'Confession': 60_572,
'Finance': 12_250,
'Politics': 16_360,
'Scary': 191_985,
'Support': 12_654,
'Technologies': 32_516,
'Teenage': 66_160,
'Event': 32_769,
'Learned': 67_460,
'Notion': 182_770,
'Wikipedia': 37_583,
'Books': 6_665,
'Extract': 76_050,
'Confessions': 102_701,
'Conspiracy': 75_932,
'Links': 63_674,
'Narcissus': 150_425,
'Relationship': 54_766,
'Relationships': 134_796,
'Reviews': 41_671,
'News': 4_256,
'Translation': 26_820,
'multilingual': 128_406,
}
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : Any = set()
_a : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_a : Any = char
_a : Dict = set(UpperCamelCase_ )
return pairs
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Optional[Any] = CONTROL_CODES
def __init__( self : Union[str, Any] , __snake_case : Dict , __snake_case : int , __snake_case : Optional[Any]="<unk>" , **__snake_case : Tuple ) -> Tuple:
super().__init__(unk_token=__snake_case , **__snake_case )
with open(__snake_case , encoding='''utf-8''' ) as vocab_handle:
_a : List[str] = json.load(__snake_case )
_a : List[Any] = {v: k for k, v in self.encoder.items()}
with open(__snake_case , encoding='''utf-8''' ) as merges_handle:
_a : str = merges_handle.read().split('''\n''' )[1:-1]
_a : List[str] = [tuple(merge.split() ) for merge in merges]
_a : Optional[Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
_a : int = {}
@property
def snake_case_ ( self : str ) -> Tuple:
return len(self.encoder )
def snake_case_ ( self : Optional[Any] ) -> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case_ ( self : Optional[Any] , __snake_case : str ) -> Tuple:
if token in self.cache:
return self.cache[token]
_a : int = tuple(__snake_case )
_a : Any = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_a : Tuple = get_pairs(__snake_case )
if not pairs:
return token
while True:
_a : List[str] = min(__snake_case , key=lambda __snake_case : self.bpe_ranks.get(__snake_case , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_a , _a : Tuple = bigram
_a : Tuple = []
_a : Tuple = 0
while i < len(__snake_case ):
try:
_a : List[Any] = word.index(__snake_case , __snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_a : str = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_a : Tuple = tuple(__snake_case )
_a : str = new_word
if len(__snake_case ) == 1:
break
else:
_a : Dict = get_pairs(__snake_case )
_a : List[Any] = '''@@ '''.join(__snake_case )
_a : int = word[:-4]
_a : List[str] = word
return word
def snake_case_ ( self : Optional[Any] , __snake_case : Any ) -> Optional[int]:
_a : Any = []
_a : Union[str, Any] = re.findall(r'''\S+\n?''' , __snake_case )
for token in words:
split_tokens.extend(list(self.bpe(__snake_case ).split(''' ''' ) ) )
return split_tokens
def snake_case_ ( self : int , __snake_case : str ) -> Dict:
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def snake_case_ ( self : int , __snake_case : List[Any] ) -> List[str]:
return self.decoder.get(__snake_case , self.unk_token )
def snake_case_ ( self : List[str] , __snake_case : Tuple ) -> int:
_a : List[str] = ''' '''.join(__snake_case ).replace('''@@ ''' , '''''' ).strip()
return out_string
def snake_case_ ( self : Any , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : List[Any] = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_a : str = os.path.join(
__snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + '''\n''' )
_a : Any = 0
with open(__snake_case , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __snake_case : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_a : List[str] = token_index
writer.write(''' '''.join(__snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 471 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__SCREAMING_SNAKE_CASE :List[Any] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__SCREAMING_SNAKE_CASE :List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def UpperCAmelCase_ ( __lowercase : str , __lowercase : List[str]=100 , __lowercase : Optional[Any]=" " ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = text.split(__lowercase )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__lowercase ) , __lowercase )]
def UpperCAmelCase_ ( __lowercase : dict ) -> dict:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(__lowercase ):
titles.append(title if title is not None else "" )
texts.append(__lowercase )
return {"title": titles, "text": texts}
def UpperCAmelCase_ ( __lowercase : dict , __lowercase : DPRContextEncoder , __lowercase : DPRContextEncoderTokenizerFast ) -> dict:
'''simple docstring'''
_UpperCAmelCase = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=__lowercase , padding="longest" , return_tensors="pt" )["input_ids"]
_UpperCAmelCase = ctx_encoder(input_ids.to(device=__lowercase ) , return_dict=__lowercase ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCAmelCase_ ( __lowercase : "RagExampleArguments" , __lowercase : "ProcessingArguments" , __lowercase : "IndexHnswArguments" , ) -> Optional[Any]:
'''simple docstring'''
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
_UpperCAmelCase = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
_UpperCAmelCase = dataset.map(__lowercase , batched=__lowercase , num_proc=processing_args.num_proc )
# And compute the embeddings
_UpperCAmelCase = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__lowercase )
_UpperCAmelCase = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
_UpperCAmelCase = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
_UpperCAmelCase = dataset.map(
partial(__lowercase , ctx_encoder=__lowercase , ctx_tokenizer=__lowercase ) , batched=__lowercase , batch_size=processing_args.batch_size , features=__lowercase , )
# And finally save your dataset
_UpperCAmelCase = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(__lowercase )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
_UpperCAmelCase = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=__lowercase )
# And save the index
_UpperCAmelCase = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(__lowercase )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A_ :
_lowerCamelCase : str = field(
default=str(Path(lowerCAmelCase_ ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
_lowerCamelCase : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
_lowerCamelCase : str = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
_lowerCamelCase : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
_lowerCamelCase : Optional[str] = field(
default=str(Path(lowerCAmelCase_ ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class A_ :
_lowerCamelCase : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
_lowerCamelCase : int = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class A_ :
_lowerCamelCase : int = field(
default=7_68 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
_lowerCamelCase : int = field(
default=1_28 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__SCREAMING_SNAKE_CASE :Tuple = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE :Optional[int] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 119 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__SCREAMING_SNAKE_CASE :List[str] = datasets.utils.logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Union[str, Any] = ['''names''', '''prefix''']
__SCREAMING_SNAKE_CASE :Dict = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__SCREAMING_SNAKE_CASE :Union[str, Any] = ['''encoding_errors''', '''on_bad_lines''']
__SCREAMING_SNAKE_CASE :int = ['''date_format''']
@dataclass
class A_ ( datasets.BuilderConfig ):
_lowerCamelCase : str = ","
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : Optional[Union[int, List[int], str]] = "infer"
_lowerCamelCase : Optional[List[str]] = None
_lowerCamelCase : Optional[List[str]] = None
_lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] = None
_lowerCamelCase : Optional[Union[List[int], List[str]]] = None
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] = None
_lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] = None
_lowerCamelCase : Optional[list] = None
_lowerCamelCase : Optional[list] = None
_lowerCamelCase : bool = False
_lowerCamelCase : Optional[Union[int, List[int]]] = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : Optional[Union[str, List[str]]] = None
_lowerCamelCase : bool = True
_lowerCamelCase : bool = True
_lowerCamelCase : bool = False
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : str = "."
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : str = '"'
_lowerCamelCase : int = 0
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : bool = True
_lowerCamelCase : bool = True
_lowerCamelCase : int = 0
_lowerCamelCase : bool = True
_lowerCamelCase : bool = False
_lowerCamelCase : Optional[str] = None
_lowerCamelCase : int = 1_00_00
_lowerCamelCase : Optional[datasets.Features] = None
_lowerCamelCase : Optional[str] = "strict"
_lowerCamelCase : Literal["error", "warn", "skip"] = "error"
_lowerCamelCase : Optional[str] = None
def lowercase ( self : int ):
if self.delimiter is not None:
_UpperCAmelCase = self.delimiter
if self.column_names is not None:
_UpperCAmelCase = self.column_names
@property
def lowercase ( self : List[Any] ):
_UpperCAmelCase = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , snake_case_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class A_ ( datasets.ArrowBasedBuilder ):
_lowerCamelCase : Any = CsvConfig
def lowercase ( self : Tuple ):
return datasets.DatasetInfo(features=self.config.features )
def lowercase ( self : Any , snake_case_ : int ):
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
_UpperCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case_ , (str, list, tuple) ):
_UpperCAmelCase = data_files
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = [files]
_UpperCAmelCase = [dl_manager.iter_files(snake_case_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCAmelCase = []
for split_name, files in data_files.items():
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = [files]
_UpperCAmelCase = [dl_manager.iter_files(snake_case_ ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case_ , gen_kwargs={"files": files} ) )
return splits
def lowercase ( self : List[str] , snake_case_ : pa.Table ):
if self.config.features is not None:
_UpperCAmelCase = self.config.features.arrow_schema
if all(not require_storage_cast(snake_case_ ) for feature in self.config.features.values() ):
# cheaper cast
_UpperCAmelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=snake_case_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_UpperCAmelCase = table_cast(snake_case_ , snake_case_ )
return pa_table
def lowercase ( self : Union[str, Any] , snake_case_ : Any ):
_UpperCAmelCase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_UpperCAmelCase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(snake_case_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case_ ) ):
_UpperCAmelCase = pd.read_csv(snake_case_ , iterator=snake_case_ , dtype=snake_case_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(snake_case_ ):
_UpperCAmelCase = pa.Table.from_pandas(snake_case_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case_ )
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}' )
raise
| 119 | 1 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__A : List[str] = logging.get_logger(__name__)
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = 'vision-encoder-decoder'
__magic_name__ = True
def __init__( self , **snake_case_ ):
super().__init__(**snake_case_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"A configuraton of type {self.model_type} cannot be instantiated because "
F"not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}" )
_A = kwargs.pop('encoder' )
_A = encoder_config.pop('model_type' )
_A = kwargs.pop('decoder' )
_A = decoder_config.pop('model_type' )
_A = AutoConfig.for_model(snake_case_ , **snake_case_ )
_A = AutoConfig.for_model(snake_case_ , **snake_case_ )
_A = True
@classmethod
def lowerCAmelCase__ ( cls , snake_case_ , snake_case_ , **snake_case_ ):
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
_A = True
_A = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **snake_case_ )
def lowerCAmelCase__ ( self ):
_A = copy.deepcopy(self.__dict__ )
_A = self.encoder.to_dict()
_A = self.decoder.to_dict()
_A = self.__class__.model_type
return output
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = version.parse('1.11' )
@property
def lowerCAmelCase__ ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCAmelCase__ ( self ):
return 1E-4
@property
def lowerCAmelCase__ ( self ):
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class lowerCamelCase( __snake_case ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ):
_A = OrderedDict()
_A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
_A = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = -1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , ):
import torch
_A = OrderedDict()
_A = super().generate_dummy_inputs(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
_A, _A = dummy_input['input_ids'].shape
_A = (batch, encoder_sequence, self._config.encoder_hidden_size)
_A = dummy_input.pop('input_ids' )
_A = dummy_input.pop('attention_mask' )
_A = torch.zeros(snake_case_ )
return common_inputs
class lowerCamelCase( __snake_case ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self , snake_case_ ):
return VisionEncoderDecoderEncoderOnnxConfig(snake_case_ )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ = "default" ):
_A = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(snake_case_ , snake_case_ )
| 27 |
from cva import destroyAllWindows, imread, imshow, waitKey
def A_ ( A__ ) -> Tuple:
# getting number of pixels in the image
a__ , a__ : Any = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(A__ ):
for j in range(A__ ):
a__ : str = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowercase : Dict = imread("""image_data/lena.jpg""", 1)
# convert to its negative
lowercase : Tuple = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 302 | 0 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : Any = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCAmelCase__ : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE = field(
default=_lowerCAmelCase ,metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_lowerCAmelCase )} )
SCREAMING_SNAKE_CASE = field(
default=_lowerCAmelCase ,metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
SCREAMING_SNAKE_CASE = field(
default=128 ,metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} ,)
SCREAMING_SNAKE_CASE = field(
default=128 ,metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} ,)
SCREAMING_SNAKE_CASE = field(
default=64 ,metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} ,)
SCREAMING_SNAKE_CASE = field(
default=30 ,metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} ,)
SCREAMING_SNAKE_CASE = field(
default=_lowerCAmelCase ,metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
SCREAMING_SNAKE_CASE = field(
default=_lowerCAmelCase ,metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
SCREAMING_SNAKE_CASE = field(
default=0.0 ,metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
SCREAMING_SNAKE_CASE = field(
default=20 ,metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
SCREAMING_SNAKE_CASE = field(
default=0 ,metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} ,)
SCREAMING_SNAKE_CASE = field(default=1 ,metadata={'''help''': '''multiple threads for converting example to features'''} )
class SCREAMING_SNAKE_CASE__ ( _lowerCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'train'
SCREAMING_SNAKE_CASE = 'dev'
class SCREAMING_SNAKE_CASE__ ( _lowerCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
def __init__( self : Union[str, Any] , UpperCAmelCase_ : SquadDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[bool] = False , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[str] = "pt" , ):
"""simple docstring"""
__UpperCAmelCase : str = args
__UpperCAmelCase : Any = is_language_sensitive
__UpperCAmelCase : List[Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__UpperCAmelCase : List[str] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
__UpperCAmelCase : Optional[Any] = mode
# Load data features from cache or dataset file
__UpperCAmelCase : Tuple = "v2" if args.version_2_with_negative else "v1"
__UpperCAmelCase : Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__UpperCAmelCase : Any = cached_features_file + ".lock"
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
__UpperCAmelCase : Union[str, Any] = time.time()
__UpperCAmelCase : Optional[Any] = torch.load(_lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__UpperCAmelCase : Optional[int] = self.old_features["features"]
__UpperCAmelCase : Tuple = self.old_features.get("dataset" , _lowerCAmelCase )
__UpperCAmelCase : int = self.old_features.get("examples" , _lowerCAmelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
" future run" )
else:
if mode == Split.dev:
__UpperCAmelCase : List[Any] = self.processor.get_dev_examples(args.data_dir )
else:
__UpperCAmelCase : List[str] = self.processor.get_train_examples(args.data_dir )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowerCAmelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowerCAmelCase , )
__UpperCAmelCase : Optional[int] = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _lowerCAmelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self : str ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Any , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : str = self.features[i]
__UpperCAmelCase : List[Any] = torch.tensor(feature.input_ids , dtype=torch.long )
__UpperCAmelCase : Optional[Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
__UpperCAmelCase : Optional[int] = torch.tensor(feature.token_type_ids , dtype=torch.long )
__UpperCAmelCase : Union[str, Any] = torch.tensor(feature.cls_index , dtype=torch.long )
__UpperCAmelCase : Tuple = torch.tensor(feature.p_mask , dtype=torch.float )
__UpperCAmelCase : Tuple = torch.tensor(feature.is_impossible , dtype=torch.float )
__UpperCAmelCase : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__UpperCAmelCase : Any = torch.tensor(feature.start_position , dtype=torch.long )
__UpperCAmelCase : Tuple = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 709 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowerCAmelCase__ : int = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase_ : int ):
"""simple docstring"""
super().__init__()
__UpperCAmelCase : Optional[Any] = torchvision.models.resnetaaa(pretrained=UpperCAmelCase_ )
__UpperCAmelCase : List[str] = list(model.children() )[:-2]
__UpperCAmelCase : int = nn.Sequential(*UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : int ):
"""simple docstring"""
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
__UpperCAmelCase : Tuple = self.pool(self.model(UpperCAmelCase_ ) )
__UpperCAmelCase : Dict = torch.flatten(UpperCAmelCase_ , start_dim=2 )
__UpperCAmelCase : int = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = [json.loads(UpperCAmelCase_ ) for l in open(UpperCAmelCase_ )]
__UpperCAmelCase : List[Any] = os.path.dirname(UpperCAmelCase_ )
__UpperCAmelCase : Any = tokenizer
__UpperCAmelCase : Union[str, Any] = labels
__UpperCAmelCase : Any = len(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = max_seq_length
__UpperCAmelCase : Any = transforms
def __len__( self : List[str] ):
"""simple docstring"""
return len(self.data )
def __getitem__( self : Tuple , UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Dict = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=UpperCAmelCase_ ) )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = sentence[0], sentence[1:-1], sentence[-1]
__UpperCAmelCase : Tuple = sentence[: self.max_seq_length]
__UpperCAmelCase : int = torch.zeros(self.n_classes )
__UpperCAmelCase : str = 1
__UpperCAmelCase : int = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
__UpperCAmelCase : Dict = self.transforms(UpperCAmelCase_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : str = [len(row["sentence"] ) for row in batch]
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = len(_UpperCAmelCase ), max(_UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = torch.zeros(_UpperCAmelCase, _UpperCAmelCase, dtype=torch.long )
__UpperCAmelCase : List[Any] = torch.zeros(_UpperCAmelCase, _UpperCAmelCase, dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_UpperCAmelCase, _UpperCAmelCase ) ):
__UpperCAmelCase : str = input_row["sentence"]
__UpperCAmelCase : Optional[int] = 1
__UpperCAmelCase : Dict = torch.stack([row["image"] for row in batch] )
__UpperCAmelCase : List[Any] = torch.stack([row["label"] for row in batch] )
__UpperCAmelCase : Tuple = torch.stack([row["image_start_token"] for row in batch] )
__UpperCAmelCase : Optional[Any] = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __UpperCamelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __UpperCamelCase ( ):
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017], std=[0.12_221_994, 0.12_145_835, 0.14_380_469], ),
] )
| 329 | 0 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Dict = get_activation("swish" )
self.assertIsInstance(snake_case , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : str = get_activation("silu" )
self.assertIsInstance(snake_case , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Any = get_activation("mish" )
self.assertIsInstance(snake_case , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Tuple = get_activation("gelu" )
self.assertIsInstance(snake_case , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 454 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = IFInpaintingPipeline
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
__UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :str , snake_case :List[Any]=0 ):
'''simple docstring'''
if str(snake_case ).startswith("mps" ):
A_ : Optional[Any] = torch.manual_seed(snake_case )
else:
A_ : Any = torch.Generator(device=snake_case ).manual_seed(snake_case )
A_ : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case )
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case )
A_ : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 454 | 1 |
"""simple docstring"""
import os
import sys
import unittest
lowerCamelCase : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
lowerCamelCase : Optional[int] = os.path.join(git_repo_path, """src""", """transformers""")
lowerCamelCase : Tuple = """
{0} = None
"""
lowerCamelCase : int = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
"""
lowerCamelCase : Dict = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
class __snake_case( unittest.TestCase ):
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' )
self.assertIsNone(A_ )
_SCREAMING_SNAKE_CASE = find_backend(''' if not is_tokenizers_available():''' )
self.assertEqual(A_ , '''tokenizers''' )
_SCREAMING_SNAKE_CASE = find_backend(''' if not is_tensorflow_text_available():''' )
self.assertEqual(A_ , '''tensorflow_text''' )
_SCREAMING_SNAKE_CASE = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' )
self.assertEqual(A_ , '''sentencepiece_and_tokenizers''' )
_SCREAMING_SNAKE_CASE = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' )
self.assertEqual(A_ , '''sentencepiece_and_tensorflow_text''' )
_SCREAMING_SNAKE_CASE = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' )
self.assertEqual(A_ , '''sentencepiece_and_tokenizers_and_vision''' )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , A_ )
self.assertIn('''tensorflow_text''' , A_ )
self.assertIn('''sentencepiece_and_tokenizers''' , A_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertModel''' , objects['''tf'''] )
self.assertIn('''FlaxBertModel''' , objects['''flax'''] )
self.assertIn('''BertModel''' , objects['''torch'''] )
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] )
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(A_ , '''\nCONSTANT = None\n''' )
_SCREAMING_SNAKE_CASE = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
A_ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
_SCREAMING_SNAKE_CASE = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
_SCREAMING_SNAKE_CASE = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(A_ , A_ )
def A ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
_SCREAMING_SNAKE_CASE = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , A_ )
| 168 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCamelCase : str = _symbol_database.Default()
lowerCamelCase : Any = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
lowerCamelCase : List[str] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCamelCase : List[str] = None
lowerCamelCase : List[str] = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCamelCase : Optional[int] = 4_5
lowerCamelCase : Tuple = 1_5_8_1
lowerCamelCase : Optional[int] = 1_5_1_7
lowerCamelCase : List[Any] = 1_5_7_0
lowerCamelCase : Dict = 1_5_8_4
lowerCamelCase : Dict = 1_7_9_3
lowerCamelCase : Optional[Any] = 1_7_9_5
lowerCamelCase : List[Any] = 1_9_1_6
lowerCamelCase : int = 1_8_6_4
lowerCamelCase : int = 1_9_0_5
lowerCamelCase : Dict = 1_9_1_9
lowerCamelCase : str = 2_4_2_9
lowerCamelCase : str = 2_2_0_8
lowerCamelCase : int = 2_4_1_8
lowerCamelCase : Dict = 2_3_2_3
lowerCamelCase : int = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 168 | 1 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / '''file.csv'''
__SCREAMING_SNAKE_CASE : str = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(_lowerCAmelCase , '''w''' ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : int = tmp_path / '''malformed_file.csv'''
__SCREAMING_SNAKE_CASE : Dict = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(_lowerCAmelCase , '''w''' ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / '''csv_with_image.csv'''
__SCREAMING_SNAKE_CASE : Optional[Any] = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(_lowerCAmelCase , '''w''' ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = tmp_path / '''csv_with_label.csv'''
__SCREAMING_SNAKE_CASE : List[Any] = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(_lowerCAmelCase , '''w''' ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : str = tmp_path / '''csv_with_int_list.csv'''
__SCREAMING_SNAKE_CASE : Optional[int] = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(_lowerCAmelCase , '''w''' ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : int = Csv()
__SCREAMING_SNAKE_CASE : List[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def _UpperCamelCase ( lowercase__ ):
with open(_lowerCAmelCase , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE : Optional[Any] = f.read().splitlines()[1]
__SCREAMING_SNAKE_CASE : List[str] = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
__SCREAMING_SNAKE_CASE : Optional[int] = csv._generate_tables([[csv_file_with_image]] )
__SCREAMING_SNAKE_CASE : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
__SCREAMING_SNAKE_CASE : Optional[Any] = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def _UpperCamelCase ( lowercase__ ):
with open(_lowerCAmelCase , encoding='''utf-8''' ) as f:
__SCREAMING_SNAKE_CASE : Tuple = f.read().splitlines()[1:]
__SCREAMING_SNAKE_CASE : Optional[Any] = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
__SCREAMING_SNAKE_CASE : List[str] = csv._generate_tables([[csv_file_with_label]] )
__SCREAMING_SNAKE_CASE : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
__SCREAMING_SNAKE_CASE : List[str] = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(_lowerCAmelCase ) for label in labels]
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda lowercase__ : [int(_lowerCAmelCase ) for i in x.split()]} )
__SCREAMING_SNAKE_CASE : Any = csv._generate_tables([[csv_file_with_int_list]] )
__SCREAMING_SNAKE_CASE : int = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
__SCREAMING_SNAKE_CASE : int = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 696 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class lowerCAmelCase_ ( A__ ):
'''simple docstring'''
def __init__( self , snake_case_=None , **snake_case_ ) -> Optional[Any]:
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , snake_case_ , )
super().__init__(args=snake_case_ , **snake_case_ )
| 465 | 0 |
import string
from math import logaa
def __lowerCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) -> int:
__lowerCAmelCase =document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
__lowerCAmelCase =document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __lowerCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> tuple[int, int]:
__lowerCAmelCase =corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowerCAmelCase =corpus_without_punctuation.split("""\n""" )
__lowerCAmelCase =term.lower()
return (len([doc for doc in docs if term in doc] ), len(__snake_case ))
def __lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def __lowerCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ) -> float:
return round(tf * idf , 3 )
| 707 |
def __lowerCAmelCase ( __lowerCamelCase : int ) -> list:
__lowerCAmelCase =int(__lowerCamelCase )
if n_element < 1:
__lowerCAmelCase =ValueError("""a should be a positive number""" )
raise my_error
__lowerCAmelCase =[1]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase =(0, 0, 0)
__lowerCAmelCase =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowercase_ = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
lowercase_ = hamming(int(n))
print('''-----------------------------------------------------''')
print(F"The list with nth numbers is: {hamming_numbers}")
print('''-----------------------------------------------------''')
| 456 | 0 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __magic_name__ ( A__, A__, unittest.TestCase ):
lowercase : Tuple =VQModel
lowercase : Any ='''sample'''
@property
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : Any=(32, 32) ) -> Any:
'''simple docstring'''
UpperCAmelCase = 4
UpperCAmelCase = 3
UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> int:
'''simple docstring'''
return (3, 32, 32)
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> int:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCamelCase__ )
UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(UpperCamelCase__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
UpperCAmelCase = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
UpperCAmelCase = image.to(UpperCamelCase__ )
with torch.no_grad():
UpperCAmelCase = model(UpperCamelCase__ ).sample
UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCAmelCase = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
| 323 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Any = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 323 | 1 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : int = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ : List[Any] = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A_ : List[str] = f'''{src_lang}-{tgt_lang}'''
A_ : List[Any] = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
A_ : Tuple = os.path.join(SCREAMING_SNAKE_CASE , '''README.md''' )
print(f'''Generating {path}''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
# make sure we are under the root of the project
UpperCamelCase = Path(__file__).resolve().parent.parent.parent
UpperCamelCase = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCamelCase , UpperCamelCase , UpperCamelCase = model_name.split("""-""")
UpperCamelCase = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 152 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = 42
snake_case = 42
snake_case = None
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
snake_case = 2
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 0.0_2 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 1.0_0_7 , _SCREAMING_SNAKE_CASE = 80 , _SCREAMING_SNAKE_CASE = 0.0_5 , _SCREAMING_SNAKE_CASE = 50 , )->Optional[Any]:
'''simple docstring'''
A_ : Tuple = sigma_max
# setable values
A_ : int = None
A_ : np.IntTensor = None
A_ : torch.FloatTensor = None # sigma(t_i)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->torch.FloatTensor:
'''simple docstring'''
return sample
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->List[Any]:
'''simple docstring'''
A_ : int = num_inference_steps
A_ : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
A_ : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
A_ : Any = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
A_ : Any = torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A_ : int = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
A_ : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
A_ : Tuple = self.config.s_noise * randn_tensor(sample.shape , generator=_SCREAMING_SNAKE_CASE ).to(sample.device )
A_ : Any = sigma + gamma * sigma
A_ : int = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , )->Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A_ : Dict = sample_hat + sigma_hat * model_output
A_ : Optional[int] = (sample_hat - pred_original_sample) / sigma_hat
A_ : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , )->Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A_ : Any = sample_prev + sigma_prev * model_output
A_ : str = (sample_prev - pred_original_sample) / sigma_prev
A_ : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
raise NotImplementedError()
| 152 | 1 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __a( nn.Module ):
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 0.0
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = jnp.floataa
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Union[str, Any] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : Tuple = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = resnets
UpperCAmelCase_ : List[str] = attentions
if self.add_downsample:
UpperCAmelCase_ : Tuple = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> List[Any]:
UpperCAmelCase_ : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
UpperCAmelCase_ : str = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = attn(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : Any = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class __a( nn.Module ):
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 0.0
lowerCAmelCase = 1
lowerCAmelCase = True
lowerCAmelCase = jnp.floataa
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : Dict = self.in_channels if i == 0 else self.out_channels
UpperCAmelCase_ : List[Any] = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = resnets
if self.add_downsample:
UpperCAmelCase_ : List[str] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> Any:
UpperCAmelCase_ : str = ()
for resnet in self.resnets:
UpperCAmelCase_ : Tuple = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
UpperCAmelCase_ : Optional[Any] = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class __a( nn.Module ):
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 0.0
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = jnp.floataa
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Tuple = []
for i in range(self.num_layers ):
UpperCAmelCase_ : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = resnets
UpperCAmelCase_ : Dict = attentions
if self.add_upsample:
UpperCAmelCase_ : Tuple = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> Dict:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
UpperCAmelCase_ : int = res_hidden_states_tuple[-1]
UpperCAmelCase_ : int = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[str] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Tuple = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = attn(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
UpperCAmelCase_ : Dict = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class __a( nn.Module ):
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 42
lowerCAmelCase = 0.0
lowerCAmelCase = 1
lowerCAmelCase = True
lowerCAmelCase = jnp.floataa
def a__ ( self ) -> Any:
UpperCAmelCase_ : List[Any] = []
for i in range(self.num_layers ):
UpperCAmelCase_ : Optional[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
UpperCAmelCase_ : Any = self.prev_output_channel if i == 0 else self.out_channels
UpperCAmelCase_ : int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = resnets
if self.add_upsample:
UpperCAmelCase_ : List[str] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> str:
for resnet in self.resnets:
# pop res hidden states
UpperCAmelCase_ : Optional[Any] = res_hidden_states_tuple[-1]
UpperCAmelCase_ : List[str] = res_hidden_states_tuple[:-1]
UpperCAmelCase_ : List[str] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
UpperCAmelCase_ : Optional[Any] = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
UpperCAmelCase_ : Tuple = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class __a( nn.Module ):
"""simple docstring"""
lowerCAmelCase = 42
lowerCAmelCase = 0.0
lowerCAmelCase = 1
lowerCAmelCase = 1
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = jnp.floataa
def a__ ( self ) -> Tuple:
# there is always at least one resnet
UpperCAmelCase_ : Any = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
UpperCAmelCase_ : int = []
for _ in range(self.num_layers ):
UpperCAmelCase_ : Tuple = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = resnets
UpperCAmelCase_ : Optional[Any] = attentions
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> Any:
UpperCAmelCase_ : int = self.resnets[0](_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
UpperCAmelCase_ : Optional[int] = attn(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = resnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,deterministic=_SCREAMING_SNAKE_CASE )
return hidden_states | 30 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class __magic_name__ ( A__ ):
lowercase : Union[str, Any] ='''mra'''
def __init__( self : Tuple , UpperCamelCase__ : Any=5_02_65 , UpperCamelCase__ : int=7_68 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Tuple=30_72 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : str=5_12 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : List[Any]=1e-5 , UpperCamelCase__ : Dict="absolute" , UpperCamelCase__ : str=4 , UpperCamelCase__ : Tuple="full" , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=2 , **UpperCamelCase__ : int , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = type_vocab_size
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = block_per_row
UpperCAmelCase = approx_mode
UpperCAmelCase = initial_prior_first_n_blocks
UpperCAmelCase = initial_prior_diagonal_n_blocks
| 323 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
SCREAMING_SNAKE_CASE : int = 50_003
SCREAMING_SNAKE_CASE : Any = 50_002
@require_sentencepiece
@require_tokenizers
class snake_case_ ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Tuple = PLBartTokenizer
SCREAMING_SNAKE_CASE_: Optional[Any] = None
SCREAMING_SNAKE_CASE_: str = False
def _UpperCAmelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A__ = PLBartTokenizer(__a , language_codes='base' , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = PLBartTokenizer(__a , language_codes='base' , keep_accents=__a )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(__a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A__ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
A__ = tokenizer.vocab_size
A__ = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 4 , __a )]
self.assertListEqual(__a , ['__java__', '__python__', '__en_XX__', '<mask>'] )
A__ = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
A__ = tokenizer(__a ).input_ids
self.assertEqual(
tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = PLBartTokenizer(__a , language_codes='multi' , keep_accents=__a )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(__a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A__ = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
A__ = tokenizer.vocab_size
A__ = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 7 , __a )]
self.assertListEqual(
__a , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
A__ = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
A__ = tokenizer(__a ).input_ids
self.assertEqual(
tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Tuple = """uclanlp/plbart-python-en_XX"""
SCREAMING_SNAKE_CASE_: Optional[Any] = [
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
SCREAMING_SNAKE_CASE_: Any = [
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
SCREAMING_SNAKE_CASE_: List[str] = [
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def _UpperCAmelCase ( cls ):
"""simple docstring"""
A__ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
A__ = 1
return cls
def _UpperCAmelCase ( self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 5_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 5_0002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 5_0003 )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
self.assertIn(__a , self.tokenizer.all_special_ids )
A__ = [EN_CODE, 9037, 3_3442, 57, 752, 153, 14, 56, 18, 9, 2]
A__ = self.tokenizer.decode(__a , skip_special_tokens=__a )
A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = ['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 20]
self.assertIsInstance(src_text[0] , __a )
A__ = 10
A__ = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __a )
self.assertEqual(len(__a ) , __a )
def _UpperCAmelCase ( self ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [5_0004, 5_0001] )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
A__ = PLBartTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors='pt' )
A__ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
A__ = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
A__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors='pt' )
A__ = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors='pt' )
A__ = targets['input_ids']
A__ = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _UpperCAmelCase ( self ):
"""simple docstring"""
A__ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(__a ) , {
# A, test, EOS, en_XX
'input_ids': [[150, 242, 2, 5_0003]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 5_0001,
} , )
| 554 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ = "cpu" ,lowerCAmelCase__ = None ):
A__ = torch.load(lowerCAmelCase__ ,map_location=lowerCAmelCase__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase__ ,torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
A__ = v.half()
if save_path is None: # overwrite src_path
A__ = src_path
torch.save(lowerCAmelCase__ ,lowerCAmelCase__ )
if __name__ == "__main__":
fire.Fire(convert)
| 554 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase_ ( __a , __a , __a , __a , __a , __a = None , ) -> str:
a__ : int = {}
if train_file is not None:
a__ : int = [train_file]
if eval_file is not None:
a__ : Union[str, Any] = [eval_file]
if test_file is not None:
a__ : str = [test_file]
a__ : Optional[Any] = datasets.load_dataset("csv" , data_files=__a )
a__ : List[Any] = list(ds[list(files.keys() )[0]].features.keys() )
a__ : str = features_name.pop(__a )
a__ : Dict = list(set(ds[list(files.keys() )[0]][label_name] ) )
a__ : str = {label: i for i, label in enumerate(__a )}
a__ : Tuple = tokenizer.model_input_names
a__ : List[str] = {}
if len(__a ) == 1:
for k in files.keys():
a__ : Optional[Any] = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__a , max_length=__a , padding="max_length" ) , batched=__a , )
elif len(__a ) == 2:
for k in files.keys():
a__ : Dict = ds[k].map(
lambda __a : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__a , max_length=__a , padding="max_length" , ) , batched=__a , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
a__ : str = {k: v for k, v in ex.items() if k in input_names}
a__ : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
a__ : Tuple = {k: v for k, v in ex.items() if k in input_names}
a__ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
a__ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
a__ : Optional[int] = labelaid[ex[label_name]]
yield (d, label)
a__ : Optional[Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
a__ : Optional[int] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
a__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
a__ : Optional[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
a__ : Union[str, Any] = (
tf.data.Dataset.from_generator(
__a , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
a__ : Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(metadata={'help': 'Which column contains the label'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the training file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the development file'} )
_lowercase = field(default=A__ , metadata={'help': 'The path of the test file'} )
_lowercase = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_lowercase = field(
default=A__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class A__ :
"""simple docstring"""
_lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
_lowercase = field(
default=A__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
_lowercase = field(default=A__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_lowercase = field(
default=A__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def UpperCamelCase_ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
a__, a__, a__ : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a__ : Union[str, Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
a__, a__, a__, a__ : Optional[Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__a , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
a__ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__a ) , labelaid=__a , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
a__ : Any = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__a , cache_dir=model_args.cache_dir , )
def compute_metrics(__a ) -> Dict:
a__ : Union[str, Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
a__ : Dict = TFTrainer(
model=__a , args=__a , train_dataset=__a , eval_dataset=__a , compute_metrics=__a , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
a__ : Optional[Any] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
a__ : Dict = trainer.evaluate()
a__ : int = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__a , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(__a )
return results
if __name__ == "__main__":
main()
| 37 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class a ( UpperCAmelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCamelCase : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
UpperCamelCase : ClassVar[Features] = Features({'text': Value('string' )} )
UpperCamelCase : ClassVar[Features] = Features({'labels': ClassLabel} )
UpperCamelCase : str = "text"
UpperCamelCase : str = "labels"
def lowerCamelCase__ ( self : str , lowerCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowerCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
SCREAMING_SNAKE_CASE_: List[str] =copy.deepcopy(self )
SCREAMING_SNAKE_CASE_: int =self.label_schema.copy()
SCREAMING_SNAKE_CASE_: Tuple =features[self.label_column]
SCREAMING_SNAKE_CASE_: Any =label_schema
return task_template
@property
def lowerCamelCase__ ( self : List[Any] ) -> Dict[str, str]:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 409 | 0 |
from __future__ import annotations
lowerCamelCase__ = [True] * 100_0001
lowerCamelCase__ = 2
while i * i <= 100_0000:
if seive[i]:
for j in range(i * i, 100_0001, i):
lowerCamelCase__ = False
i += 1
def lowerCAmelCase__ ( a__ ) ->bool:
'''simple docstring'''
return seive[n]
def lowerCAmelCase__ ( a__ ) ->bool:
'''simple docstring'''
return any(digit in "02468" for digit in str(a__ ) )
def lowerCAmelCase__ ( a__ = 1_000_000 ) ->list[int]:
'''simple docstring'''
_UpperCamelCase = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(a__ ) and not contains_an_even_digit(a__ ):
_UpperCamelCase = str(a__ )
_UpperCamelCase = [int(str_num[j:] + str_num[:j] ) for j in range(len(a__ ) )]
if all(is_prime(a__ ) for i in list_nums ):
result.append(a__ )
return result
def lowerCAmelCase__ ( ) ->int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"{len(find_circular_primes()) = }")
| 720 | import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
lowerCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
lowerCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string"),
"references": datasets.Value("string"),
}) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = 0.0
for i, j in zip(lowercase_ , lowercase_):
n_correct += 1.0 if math_equivalence.is_equiv(lowercase_ , lowercase_) else 0.0
_UpperCamelCase = n_correct / len(lowercase_)
return {
"accuracy": accuracy,
}
| 82 | 0 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( snake_case, unittest.TestCase ):
lowerCamelCase_ = PegasusTokenizer
lowerCamelCase_ = PegasusTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A : Optional[Any] = PegasusTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def _UpperCAmelCase ( self : Union[str, Any] , **snake_case_ : Tuple ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : int ):
"""simple docstring"""
return ("This is a test", "This is a test")
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
A : List[Any] = '''</s>'''
A : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(A_ ) , 1103 )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
A : str = self.tokenizer_class.from_pretrained(self.tmpdirname )
A : Dict = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
A : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=A_ , add_special_tokens=A_ ).input_ids[0]
A : List[str] = py_tokenizer([raw_input_str] , return_tensors=A_ , add_special_tokens=A_ ).input_ids[0]
self.assertListEqual(A_ , A_ )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
A : Optional[int] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
A : List[Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
A : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
A : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=A_ ).input_ids[0]
self.assertListEqual(A_ , A_ )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
A : Union[str, Any] = '''To ensure a smooth flow of bank resolutions.'''
A : str = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
A : Optional[int] = tokenizer([raw_input_str] , return_tensors=A_ ).input_ids[0]
self.assertListEqual(A_ , A_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : int = ['''This is going to be way too long.''' * 150, '''short example''']
A : str = ['''not super long but more than 5 tokens''', '''tiny''']
A : Tuple = self._large_tokenizer(A_ , padding=A_ , truncation=A_ , return_tensors='''pt''' )
A : List[str] = self._large_tokenizer(
text_target=A_ , max_length=5 , padding=A_ , truncation=A_ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(A_ ) == 2 # input_ids, attention_mask.
@slow
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
A : List[str] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( snake_case, unittest.TestCase ):
lowerCamelCase_ = PegasusTokenizer
lowerCamelCase_ = PegasusTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = True
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A : List[str] = PegasusTokenizer(A_ , offset=0 , mask_token_sent=A_ , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def _UpperCAmelCase ( self : Any , **snake_case_ : Optional[Any] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A_ )
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : str ):
"""simple docstring"""
return ("This is a test", "This is a test")
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
A : Optional[int] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
A : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
A : Dict = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
A : Dict = rust_tokenizer([raw_input_str] , return_tensors=A_ , add_special_tokens=A_ ).input_ids[0]
A : Dict = py_tokenizer([raw_input_str] , return_tensors=A_ , add_special_tokens=A_ ).input_ids[0]
self.assertListEqual(A_ , A_ )
@require_torch
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
A : Union[str, Any] = ['''This is going to be way too long.''' * 1000, '''short example''']
A : Tuple = ['''not super long but more than 5 tokens''', '''tiny''']
A : List[str] = self._large_tokenizer(A_ , padding=A_ , truncation=A_ , return_tensors='''pt''' )
A : List[Any] = self._large_tokenizer(
text_target=A_ , max_length=5 , padding=A_ , truncation=A_ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(A_ ) == 2 # input_ids, attention_mask.
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
A : Any = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
A : List[Any] = self._large_tokenizer(A_ ).input_ids
self.assertListEqual(
A_ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , ) | 256 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : Optional[Any] = logging.get_logger(__name__)
def a_ ( lowercase__ :List[str] ):
__lowerCamelCase = SwinConfig(
embed_dim=192, depths=(2, 2, 18, 2), num_heads=(6, 12, 24, 48), window_size=12, out_features=["""stage2""", """stage3""", """stage4"""], )
__lowerCamelCase = DetaConfig(
backbone_config=lowercase__, num_queries=900, encoder_ffn_dim=2048, decoder_ffn_dim=2048, num_feature_levels=5, assign_first_stage=lowercase__, with_box_refine=lowercase__, two_stage=lowercase__, )
# set labels
__lowerCamelCase = """huggingface/label-files"""
if "o365" in model_name:
__lowerCamelCase = 366
__lowerCamelCase = """object365-id2label.json"""
else:
__lowerCamelCase = 91
__lowerCamelCase = """coco-detection-id2label.json"""
__lowerCamelCase = num_labels
__lowerCamelCase = json.load(open(cached_download(hf_hub_url(lowercase__, lowercase__, repo_type="""dataset""" ) ), """r""" ) )
__lowerCamelCase = {int(lowercase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
return config
def a_ ( lowercase__ :List[str] ):
__lowerCamelCase = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def a_ ( lowercase__ :Optional[Any], lowercase__ :str, lowercase__ :Optional[int] ):
__lowerCamelCase = dct.pop(lowercase__ )
__lowerCamelCase = val
def a_ ( lowercase__ :List[Any], lowercase__ :Dict ):
__lowerCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
__lowerCamelCase = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[:dim, :]
__lowerCamelCase = in_proj_bias[: dim]
__lowerCamelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCamelCase = in_proj_bias[
dim : dim * 2
]
__lowerCamelCase = in_proj_weight[
-dim :, :
]
__lowerCamelCase = in_proj_bias[-dim :]
# fmt: on
def a_ ( lowercase__ :int, lowercase__ :Optional[int] ):
# transformer decoder self-attention layers
__lowerCamelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__lowerCamelCase = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
__lowerCamelCase = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[:hidden_size, :]
__lowerCamelCase = in_proj_bias[:hidden_size]
__lowerCamelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__lowerCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCamelCase = in_proj_weight[-hidden_size:, :]
__lowerCamelCase = in_proj_bias[-hidden_size:]
def a_ ( ):
__lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCamelCase = Image.open(requests.get(lowercase__, stream=lowercase__ ).raw )
return im
@torch.no_grad()
def a_ ( lowercase__ :Optional[int], lowercase__ :Any, lowercase__ :Dict ):
__lowerCamelCase = get_deta_config(lowercase__ )
# load original state dict
if model_name == "deta-swin-large":
__lowerCamelCase = hf_hub_download(repo_id="""nielsr/deta-checkpoints""", filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
__lowerCamelCase = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""", filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(f'Model name {model_name} not supported' )
__lowerCamelCase = torch.load(lowercase__, map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(lowercase__, param.shape )
# rename keys
__lowerCamelCase = create_rename_keys(lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__, lowercase__, lowercase__ )
read_in_swin_q_k_v(lowercase__, config.backbone_config )
read_in_decoder_q_k_v(lowercase__, lowercase__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__lowerCamelCase = state_dict.pop(lowercase__ )
__lowerCamelCase = val
if "input_proj" in key:
__lowerCamelCase = state_dict.pop(lowercase__ )
__lowerCamelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__lowerCamelCase = state_dict.pop(lowercase__ )
__lowerCamelCase = val
# finally, create HuggingFace model and load state dict
__lowerCamelCase = DetaForObjectDetection(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
__lowerCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(lowercase__ )
# load image processor
__lowerCamelCase = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
__lowerCamelCase = prepare_img()
__lowerCamelCase = processor(images=lowercase__, return_tensors="""pt""" )
__lowerCamelCase = encoding["""pixel_values"""]
__lowerCamelCase = model(pixel_values.to(lowercase__ ) )
# verify logits
print("""Logits:""", outputs.logits[0, :3, :3] )
print("""Boxes:""", outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__lowerCamelCase = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
__lowerCamelCase = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
__lowerCamelCase = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
__lowerCamelCase = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(lowercase__ ), atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(lowercase__ ), atol=1e-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__magic_name__ : str = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 281 | 0 |
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowercase ( self : str ) -> List[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowercase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
_lowercase : Any = -1
_lowercase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
_lowercase : Any = model.generate(UpperCamelCase_ , max_new_tokens=10 , do_sample=UpperCamelCase_ )
_lowercase : int = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowercase : Union[str, Any] = TextStreamer(UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=10 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowercase : Tuple = cs.out[:-1]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __lowercase ( self : Any ) -> str:
'''simple docstring'''
_lowercase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowercase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
_lowercase : Dict = -1
_lowercase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
_lowercase : List[Any] = model.generate(UpperCamelCase_ , max_new_tokens=10 , do_sample=UpperCamelCase_ )
_lowercase : int = tokenizer.decode(greedy_ids[0] )
_lowercase : Dict = TextIteratorStreamer(UpperCamelCase_ )
_lowercase : Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
_lowercase : Tuple = Thread(target=model.generate , kwargs=UpperCamelCase_ )
thread.start()
_lowercase : Any = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __lowercase ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowercase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
_lowercase : Union[str, Any] = -1
_lowercase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
_lowercase : Optional[Any] = model.generate(UpperCamelCase_ , max_new_tokens=10 , do_sample=UpperCamelCase_ )
_lowercase : Any = greedy_ids[:, input_ids.shape[1] :]
_lowercase : Any = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowercase : List[str] = TextStreamer(UpperCamelCase_ , skip_prompt=UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=10 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowercase : Tuple = cs.out[:-1]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __lowercase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_lowercase : Tuple = AutoTokenizer.from_pretrained('''distilgpt2''' )
_lowercase : str = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(UpperCamelCase_ )
_lowercase : str = -1
_lowercase : int = torch.ones((1, 5) , device=UpperCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowercase : int = TextStreamer(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
model.generate(UpperCamelCase_ , max_new_tokens=1 , do_sample=UpperCamelCase_ , streamer=UpperCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowercase : Optional[Any] = cs.out[:-1] # Remove the final "\n"
_lowercase : List[str] = tokenizer(UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __lowercase ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
_lowercase : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase_ )
_lowercase : Union[str, Any] = -1
_lowercase : Optional[int] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase_ )
_lowercase : Optional[int] = TextIteratorStreamer(UpperCamelCase_ , timeout=0.001 )
_lowercase : str = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
_lowercase : Optional[Any] = Thread(target=model.generate , kwargs=UpperCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCamelCase_ ):
_lowercase : List[str] = ''''''
for new_text in streamer:
streamer_text += new_text
| 702 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _lowerCAmelCase ( __A ):
'''simple docstring'''
def __init__( self : int ) -> int:
'''simple docstring'''
_lowercase : Union[str, Any] = []
def __lowercase ( self : List[str] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any , **UpperCamelCase_ : List[str] ) -> str:
'''simple docstring'''
self.events.append('''on_init_end''' )
def __lowercase ( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , **UpperCamelCase_ : int ) -> int:
'''simple docstring'''
self.events.append('''on_train_begin''' )
def __lowercase ( self : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
self.events.append('''on_train_end''' )
def __lowercase ( self : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : str , **UpperCamelCase_ : Any ) -> str:
'''simple docstring'''
self.events.append('''on_epoch_begin''' )
def __lowercase ( self : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : str , **UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
self.events.append('''on_epoch_end''' )
def __lowercase ( self : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : Dict ) -> Tuple:
'''simple docstring'''
self.events.append('''on_step_begin''' )
def __lowercase ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : int , UpperCamelCase_ : Any , **UpperCamelCase_ : Optional[Any] ) -> int:
'''simple docstring'''
self.events.append('''on_step_end''' )
def __lowercase ( self : List[str] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Dict ) -> Tuple:
'''simple docstring'''
self.events.append('''on_evaluate''' )
def __lowercase ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , **UpperCamelCase_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.events.append('''on_predict''' )
def __lowercase ( self : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] , **UpperCamelCase_ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.events.append('''on_save''' )
def __lowercase ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , **UpperCamelCase_ : Any ) -> Union[str, Any]:
'''simple docstring'''
self.events.append('''on_log''' )
def __lowercase ( self : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , **UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
self.events.append('''on_prediction_step''' )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowercase ( self : List[Any] ) -> int:
'''simple docstring'''
_lowercase : Dict = tempfile.mkdtemp()
def __lowercase ( self : Dict ) -> str:
'''simple docstring'''
shutil.rmtree(self.output_dir )
def __lowercase ( self : Optional[int] , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Any=0 , UpperCamelCase_ : Dict=64 , UpperCamelCase_ : Optional[int]=64 , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Union[str, Any]=False , **UpperCamelCase_ : str ) -> Dict:
'''simple docstring'''
_lowercase : Optional[int] = RegressionDataset(length=UpperCamelCase_ )
_lowercase : Any = RegressionDataset(length=UpperCamelCase_ )
_lowercase : int = RegressionModelConfig(a=UpperCamelCase_ , b=UpperCamelCase_ )
_lowercase : int = RegressionPreTrainedModel(UpperCamelCase_ )
_lowercase : int = TrainingArguments(self.output_dir , disable_tqdm=UpperCamelCase_ , report_to=[] , **UpperCamelCase_ )
return Trainer(
UpperCamelCase_ , UpperCamelCase_ , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , callbacks=UpperCamelCase_ , )
def __lowercase ( self : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
# Order doesn't matter
_lowercase : Tuple = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : cb.__name__ if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cb.__class__.__name__ )
_lowercase : Union[str, Any] = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : cb.__name__ if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cb.__class__.__name__ )
for cba, cba in zip(UpperCamelCase_ , UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(UpperCamelCase_ , cba.__class__ )
elif not isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(cba.__class__ , UpperCamelCase_ )
else:
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def __lowercase ( self : List[Any] , UpperCamelCase_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = ['''on_init_end''', '''on_train_begin''']
_lowercase : Optional[int] = 0
_lowercase : int = len(trainer.get_eval_dataloader() )
_lowercase : List[Any] = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(UpperCamelCase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __lowercase ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[Any] = self.get_trainer()
_lowercase : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
# Callbacks passed at init are added to the default callbacks
_lowercase : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
_lowercase : Optional[Any] = self.get_trainer(disable_tqdm=UpperCamelCase_ )
_lowercase : Dict = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
def __lowercase ( self : List[str] ) -> str:
'''simple docstring'''
_lowercase : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
_lowercase : List[str] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(UpperCamelCase_ )
expected_callbacks.remove(UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
_lowercase : Dict = self.get_trainer()
_lowercase : Dict = trainer.pop_callback(UpperCamelCase_ )
self.assertEqual(cb.__class__ , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
trainer.add_callback(UpperCamelCase_ )
expected_callbacks.insert(0 , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
# We can also add, pop, or remove by instance
_lowercase : str = self.get_trainer()
_lowercase : str = trainer.callback_handler.callbacks[0]
trainer.remove_callback(UpperCamelCase_ )
expected_callbacks.remove(UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
_lowercase : Tuple = self.get_trainer()
_lowercase : Dict = trainer.callback_handler.callbacks[0]
_lowercase : Optional[int] = trainer.pop_callback(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
trainer.add_callback(UpperCamelCase_ )
expected_callbacks.insert(0 , UpperCamelCase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase_ )
def __lowercase ( self : Dict ) -> Any:
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=UpperCamelCase_ )
_lowercase : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
_lowercase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
# Independent log/save/eval
_lowercase : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
_lowercase : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
_lowercase : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
_lowercase : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
_lowercase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
_lowercase : Optional[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
_lowercase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
_lowercase : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
# A bit of everything
_lowercase : Optional[int] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
_lowercase : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase_ , self.get_expected_events(UpperCamelCase_ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
_lowercase : Union[str, Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(UpperCamelCase_ ) in warn_mock.call_args[0][0]
| 411 | 0 |
"""simple docstring"""
import argparse
import struct
import unittest
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , _lowerCamelCase : bytes ):
A__ = data
# Initialize hash values
A__ = [
0x6A09_E667,
0xBB67_AE85,
0x3C6E_F372,
0xA54F_F53A,
0x510E_527F,
0x9B05_688C,
0x1F83_D9AB,
0x5BE0_CD19,
]
# Initialize round constants
A__ = [
0x428A_2F98,
0x7137_4491,
0xB5C0_FBCF,
0xE9B5_DBA5,
0x3956_C25B,
0x59F1_11F1,
0x923F_82A4,
0xAB1C_5ED5,
0xD807_AA98,
0x1283_5B01,
0x2431_85BE,
0x550C_7DC3,
0x72BE_5D74,
0x80DE_B1FE,
0x9BDC_06A7,
0xC19B_F174,
0xE49B_69C1,
0xEFBE_4786,
0x0FC1_9DC6,
0x240C_A1CC,
0x2DE9_2C6F,
0x4A74_84AA,
0x5CB0_A9DC,
0x76F9_88DA,
0x983E_5152,
0xA831_C66D,
0xB003_27C8,
0xBF59_7FC7,
0xC6E0_0BF3,
0xD5A7_9147,
0x06CA_6351,
0x1429_2967,
0x27B7_0A85,
0x2E1B_2138,
0x4D2C_6DFC,
0x5338_0D13,
0x650A_7354,
0x766A_0ABB,
0x81C2_C92E,
0x9272_2C85,
0xA2BF_E8A1,
0xA81A_664B,
0xC24B_8B70,
0xC76C_51A3,
0xD192_E819,
0xD699_0624,
0xF40E_3585,
0x106A_A070,
0x19A4_C116,
0x1E37_6C08,
0x2748_774C,
0x34B0_BCB5,
0x391C_0CB3,
0x4ED8_AA4A,
0x5B9C_CA4F,
0x682E_6FF3,
0x748F_82EE,
0x78A5_636F,
0x84C8_7814,
0x8CC7_0208,
0x90BE_FFFA,
0xA450_6CEB,
0xBEF9_A3F7,
0xC671_78F2,
]
A__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A__ ( _lowerCamelCase : bytes ):
A__ = B'''\x80''' + (B'''\x00''' * (6_3 - (len(_lowerCamelCase ) + 8) % 6_4))
A__ = struct.pack('''>Q''' , (len(_lowerCamelCase ) * 8) )
return data + padding + big_endian_integer
def A__ ( self : Union[str, Any] ):
# Convert into blocks of 64 bytes
A__ = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data ) , 6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A__ = list(struct.unpack('''>16L''' , _lowerCamelCase ) )
# add 48 0-ed integers
words += [0] * 4_8
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = self.hashes
for index in range(0 , 6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
A__ = (
self.ror(words[index - 1_5] , 7 )
^ self.ror(words[index - 1_5] , 1_8 )
^ (words[index - 1_5] >> 3)
)
A__ = (
self.ror(words[index - 2] , 1_7 )
^ self.ror(words[index - 2] , 1_9 )
^ (words[index - 2] >> 1_0)
)
A__ = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0x1_0000_0000
# Compression
A__ = self.ror(_lowerCamelCase , 6 ) ^ self.ror(_lowerCamelCase , 1_1 ) ^ self.ror(_lowerCamelCase , 2_5 )
A__ = (e & f) ^ ((~e & 0xFFFF_FFFF) & g)
A__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_0000_0000
A__ = self.ror(_lowerCamelCase , 2 ) ^ self.ror(_lowerCamelCase , 1_3 ) ^ self.ror(_lowerCamelCase , 2_2 )
A__ = (a & b) ^ (a & c) ^ (b & c)
A__ = (sa + maj) % 0x1_0000_0000
A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ = (
g,
f,
e,
((d + tempa) % 0x1_0000_0000),
c,
b,
a,
((tempa + tempa) % 0x1_0000_0000),
)
A__ = [a, b, c, d, e, f, g, h]
# Modify final values
A__ = [
((element + mutated_hash_values[index]) % 0x1_0000_0000)
for index, element in enumerate(self.hashes )
]
A__ = ''''''.join([hex(_lowerCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def A__ ( self : int , _lowerCamelCase : int , _lowerCamelCase : int ):
return 0xFFFF_FFFF & (value << (3_2 - rotations)) | (value >> rotations)
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Tuple ):
import hashlib
A__ = bytes('''Test String''' , '''utf-8''' )
self.assertEqual(SHAaaa(_lowerCamelCase ).hash , hashlib.shaaaa(_lowerCamelCase ).hexdigest() )
def a_ ( ):
import doctest
doctest.testmod()
A__ = argparse.ArgumentParser()
parser.add_argument(
'''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument(
'''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
A__ = parser.parse_args()
A__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
A__ = f.read()
else:
A__ = bytes(__a , '''utf-8''' )
print(SHAaaa(__a ).hash )
if __name__ == "__main__":
main()
| 571 |
"""simple docstring"""
__snake_case : Optional[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def a_ ( __a , __a , __a , __a ):
# Return True if there is node that has not iterated.
A__ = [False] * len(__a )
A__ = [s]
A__ = True
while queue:
A__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__a )
A__ = True
A__ = u
return visited[t]
def a_ ( __a , __a , __a ):
A__ = [-1] * (len(__a ))
A__ = 0
A__ = []
A__ = [i[:] for i in graph] # Record original cut, copy.
while bfs(__a , __a , __a , __a ):
A__ = float('''Inf''' )
A__ = sink
while s != source:
# Find the minimum value in select path
A__ = min(__a , graph[parent[s]][s] )
A__ = parent[s]
max_flow += path_flow
A__ = sink
while v != source:
A__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
A__ = parent[v]
for i in range(len(__a ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 571 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
__snake_case :Dict =logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
A_ : Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
A_ : Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
A_ : int = field(
default=1_0_2_4 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : bool = field(
default=_lowerCamelCase , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
A_ : bool = field(
default=_lowerCamelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
A_ : Optional[int] = field(
default=_lowerCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'A csv or a json file containing the training data.'} )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'A csv or a json file containing the validation data.'} )
A_ : Optional[str] = field(default=_lowerCamelCase , metadata={'help': 'A csv or a json file containing the test data.'} )
def __UpperCamelCase ( self : Any ) -> List[Any]:
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
A = self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
A = self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowerCAmelCase__ :
A_ : str = field(
default=_lowerCamelCase , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
A_ : Optional[str] = field(
default=_lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
A_ : bool = field(
default=_lowerCamelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
A_ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
A_ : bool = field(
default=_lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowerCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A , A , A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A , A , A = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
A = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
A = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
A = data_args.train_file.split('.' )[-1]
A = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
A = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(F'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
A = load_dataset('csv' , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
A = load_dataset('json' , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
A = raw_datasets['train'].features['label'].names
A = len(lowerCAmelCase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
A = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCAmelCase__ , )
A = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
A = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
A = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
A = {'Refused': 0, 'Entailed': 1}
A = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
A = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase__ : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase__ : Optional[int] ):
A = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
A = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
A = examples['statement']
A = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
A = tokenizer(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
A = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
A = raw_datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
A = raw_datasets['train']
if data_args.max_train_samples is not None:
A = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
A = raw_datasets['validation']
if data_args.max_eval_samples is not None:
A = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
A = raw_datasets['test']
if data_args.max_predict_samples is not None:
A = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase__ ) ) , 3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase__ : EvalPrediction ):
A = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase__ ) else p.predictions
A = np.argmax(lowerCAmelCase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
A = default_data_collator
elif training_args.fpaa:
A = DataCollatorWithPadding(lowerCAmelCase__ , pad_to_multiple_of=8 )
else:
A = None
# Initialize our Trainer
A = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
A = None
if training_args.resume_from_checkpoint is not None:
A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A = last_checkpoint
A = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
A = train_result.metrics
A = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
A = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , lowerCAmelCase__ )
trainer.save_metrics('train' , lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate(eval_dataset=lowerCAmelCase__ )
A = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
A = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics('eval' , lowerCAmelCase__ )
trainer.save_metrics('eval' , lowerCAmelCase__ )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
A = predict_dataset.remove_columns('label' )
A = trainer.predict(lowerCAmelCase__ , metric_key_prefix='predict' ).predictions
A = np.argmax(lowerCAmelCase__ , axis=1 )
A = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(lowerCAmelCase__ , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(lowerCAmelCase__ ):
A = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
A = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> Optional[Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main() | 224 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :str =logging.get_logger(__name__)
__snake_case :List[str] ={
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : str = 'mgp-str'
def __init__( self : Optional[Any] , __UpperCamelCase : int=[32, 128] , __UpperCamelCase : str=4 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : Tuple=27 , __UpperCamelCase : Optional[Any]=38 , __UpperCamelCase : str=50_257 , __UpperCamelCase : Optional[int]=30_522 , __UpperCamelCase : Union[str, Any]=768 , __UpperCamelCase : List[str]=12 , __UpperCamelCase : Optional[Any]=12 , __UpperCamelCase : str=4.0 , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : int=False , __UpperCamelCase : Tuple=1e-5 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Any=0.0_2 , **__UpperCamelCase : List[str] , ) -> Union[str, Any]:
super().__init__(**__UpperCamelCase )
A = image_size
A = patch_size
A = num_channels
A = max_token_length
A = num_character_labels
A = num_bpe_labels
A = num_wordpiece_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = mlp_ratio
A = distilled
A = layer_norm_eps
A = drop_rate
A = qkv_bias
A = attn_drop_rate
A = drop_path_rate
A = output_aa_attentions
A = initializer_range | 224 | 1 |
import math
from collections.abc import Callable
def snake_case__ ( lowercase , lowercase , lowercase ):
lowerCAmelCase_: float = xa
lowerCAmelCase_: float = xa
while True:
if x_n == x_na or function(lowercase ) == function(lowercase ):
raise ZeroDivisionError("float division by zero, could not find root" )
lowerCAmelCase_: float = x_na - (
function(lowercase ) / ((function(lowercase ) - function(lowercase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
lowerCAmelCase_: Union[str, Any] = x_na
lowerCAmelCase_: Tuple = x_na
def snake_case__ ( lowercase ):
return math.pow(lowercase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 613 | def snake_case__ ( lowercase , lowercase ):
lowerCAmelCase_: list[list[str]] = [[] for _ in range(lowercase )]
lowerCAmelCase_: Optional[Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(lowercase ) <= key:
return input_string
for position, character in enumerate(lowercase ):
lowerCAmelCase_: Optional[Any] = position % (lowest * 2) # puts it in bounds
lowerCAmelCase_: Any = min(lowercase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowercase )
lowerCAmelCase_: Optional[int] = ["".join(lowercase ) for row in temp_grid]
lowerCAmelCase_: int = "".join(lowercase )
return output_string
def snake_case__ ( lowercase , lowercase ):
lowerCAmelCase_: Tuple = []
lowerCAmelCase_: str = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
lowerCAmelCase_: list[list[str]] = [[] for _ in range(lowercase )] # generates template
for position in range(len(lowercase ) ):
lowerCAmelCase_: List[str] = position % (lowest * 2) # puts it in bounds
lowerCAmelCase_: Optional[int] = min(lowercase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
lowerCAmelCase_: Optional[Any] = 0
for row in temp_grid: # fills in the characters
lowerCAmelCase_: Tuple = input_string[counter : counter + len(lowercase )]
grid.append(list(lowercase ) )
counter += len(lowercase )
lowerCAmelCase_: int = "" # reads as zigzag
for position in range(len(lowercase ) ):
lowerCAmelCase_: str = position % (lowest * 2) # puts it in bounds
lowerCAmelCase_: Optional[int] = min(lowercase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def snake_case__ ( lowercase ):
lowerCAmelCase_: Dict = {}
for key_guess in range(1 , len(lowercase ) ): # tries every key
lowerCAmelCase_: int = decrypt(lowercase , lowercase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod() | 613 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self : List[Any] , lowercase : Optional[Any] , lowercase : str=7 , lowercase : Optional[int]=3 , lowercase : Any=30 , lowercase : Dict=4_00 , lowercase : Tuple=True , lowercase : Tuple=None , lowercase : Union[str, Any]=True , lowercase : int=[0.5, 0.5, 0.5] , lowercase : int=[0.5, 0.5, 0.5] , lowercase : Union[str, Any]=True , lowercase : List[str]=1 / 2_55 , lowercase : Dict=True , ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
UpperCAmelCase : int = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : List[Any] = num_channels
UpperCAmelCase : Tuple = min_resolution
UpperCAmelCase : int = max_resolution
UpperCAmelCase : Any = do_resize
UpperCAmelCase : List[str] = size
UpperCAmelCase : Optional[int] = do_normalize
UpperCAmelCase : Dict = image_mean
UpperCAmelCase : str = image_std
UpperCAmelCase : Tuple = do_rescale
UpperCAmelCase : Dict = rescale_factor
UpperCAmelCase : Optional[Any] = do_pad
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self : str , lowercase : Optional[int] , lowercase : Tuple=False ):
'''simple docstring'''
if not batched:
UpperCAmelCase : Any = image_inputs[0]
if isinstance(__UpperCamelCase , Image.Image ):
UpperCAmelCase , UpperCAmelCase : Dict = image.size
else:
UpperCAmelCase , UpperCAmelCase : Any = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase : Union[str, Any] = int(self.size["shortest_edge"] * h / w )
UpperCAmelCase : List[Any] = self.size["shortest_edge"]
elif w > h:
UpperCAmelCase : Dict = self.size["shortest_edge"]
UpperCAmelCase : Tuple = int(self.size["shortest_edge"] * w / h )
else:
UpperCAmelCase : Optional[int] = self.size["shortest_edge"]
UpperCAmelCase : str = self.size["shortest_edge"]
else:
UpperCAmelCase : Union[str, Any] = []
for image in image_inputs:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase : Optional[Any] = max(__UpperCamelCase , key=lambda lowercase : item[0] )[0]
UpperCAmelCase : List[Any] = max(__UpperCamelCase , key=lambda lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case__ ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = YolosImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = YolosImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size" ) )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__UpperCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , __UpperCamelCase )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
UpperCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase : Tuple = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
UpperCAmelCase : Optional[Any] = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
UpperCAmelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase , UpperCAmelCase : int = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase : int = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
UpperCAmelCase , UpperCAmelCase : int = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase , UpperCAmelCase : List[str] = self.image_processor_tester.get_expected_values(__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase : Optional[Any] = image_processing(__UpperCamelCase , return_tensors="pt" ).pixel_values
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.image_processor_tester.get_expected_values(__UpperCamelCase , batched=__UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase : int = self.image_processing_class(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase , do_rescale=__UpperCamelCase )
# create random PyTorch tensors
UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
UpperCAmelCase : List[str] = image_processing_a.pad(__UpperCamelCase , return_tensors="pt" )
UpperCAmelCase : Tuple = image_processing_a(__UpperCamelCase , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1E-4 ) )
@slow
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
UpperCAmelCase : Optional[int] = json.loads(f.read() )
UpperCAmelCase : List[Any] = {"image_id": 3_97_69, "annotations": target}
# encode them
UpperCAmelCase : Union[str, Any] = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
UpperCAmelCase : Optional[Any] = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , return_tensors="pt" )
# verify pixel values
UpperCAmelCase : Union[str, Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __UpperCamelCase )
UpperCAmelCase : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
UpperCAmelCase : Union[str, Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCamelCase ) )
# verify boxes
UpperCAmelCase : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCamelCase )
UpperCAmelCase : Union[str, Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
UpperCAmelCase : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCamelCase ) )
# verify is_crowd
UpperCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCamelCase ) )
# verify class_labels
UpperCAmelCase : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCamelCase ) )
# verify orig_size
UpperCAmelCase : Optional[int] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCamelCase ) )
# verify size
UpperCAmelCase : Union[str, Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCamelCase ) )
@slow
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
UpperCAmelCase : int = json.loads(f.read() )
UpperCAmelCase : List[Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
UpperCAmelCase : Dict = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
UpperCAmelCase : List[Any] = YolosImageProcessor(format="coco_panoptic" )
UpperCAmelCase : int = image_processing(images=__UpperCamelCase , annotations=__UpperCamelCase , masks_path=__UpperCamelCase , return_tensors="pt" )
# verify pixel values
UpperCAmelCase : Any = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , __UpperCamelCase )
UpperCAmelCase : List[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __UpperCamelCase , atol=1E-4 ) )
# verify area
UpperCAmelCase : Optional[Any] = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __UpperCamelCase ) )
# verify boxes
UpperCAmelCase : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __UpperCamelCase )
UpperCAmelCase : List[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __UpperCamelCase , atol=1E-3 ) )
# verify image_id
UpperCAmelCase : Union[str, Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __UpperCamelCase ) )
# verify is_crowd
UpperCAmelCase : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __UpperCamelCase ) )
# verify class_labels
UpperCAmelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __UpperCamelCase ) )
# verify masks
UpperCAmelCase : int = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __UpperCamelCase )
# verify orig_size
UpperCAmelCase : Dict = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __UpperCamelCase ) )
# verify size
UpperCAmelCase : Optional[int] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __UpperCamelCase ) )
| 702 |
"""simple docstring"""
import math
def lowercase_ ( _lowercase : int ):
'''simple docstring'''
return math.sqrt(_lowercase ) * math.sqrt(_lowercase ) == num
def lowercase_ ( _lowercase : int ):
'''simple docstring'''
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Tuple = n
while left <= right:
UpperCAmelCase : Tuple = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase : List[str] = mid - 1
else:
UpperCAmelCase : Any = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = 42
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
@register_to_config
def __init__( self : List[str] , __magic_name__ : int = 65_536 , __magic_name__ : Optional[int] = None , __magic_name__ : int = 2 , __magic_name__ : int = 2 , __magic_name__ : int = 0 , __magic_name__ : str = "fourier" , __magic_name__ : bool = True , __magic_name__ : bool = False , __magic_name__ : float = 0.0 , __magic_name__ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , __magic_name__ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , __magic_name__ : Tuple[str] = "UNetMidBlock1D" , __magic_name__ : str = None , __magic_name__ : Tuple[int] = (32, 32, 64) , __magic_name__ : str = None , __magic_name__ : int = 8 , __magic_name__ : int = 1 , __magic_name__ : bool = False , ) -> Dict:
super().__init__()
SCREAMING_SNAKE_CASE_ = sample_size
# time
if time_embedding_type == "fourier":
SCREAMING_SNAKE_CASE_ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=__magic_name__ , log=__magic_name__ , flip_sin_to_cos=__magic_name__ )
SCREAMING_SNAKE_CASE_ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
SCREAMING_SNAKE_CASE_ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=__magic_name__ , downscale_freq_shift=__magic_name__ )
SCREAMING_SNAKE_CASE_ = block_out_channels[0]
if use_timestep_embedding:
SCREAMING_SNAKE_CASE_ = block_out_channels[0] * 4
SCREAMING_SNAKE_CASE_ = TimestepEmbedding(
in_channels=__magic_name__ , time_embed_dim=__magic_name__ , act_fn=__magic_name__ , out_dim=block_out_channels[0] , )
SCREAMING_SNAKE_CASE_ = nn.ModuleList([] )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = nn.ModuleList([] )
SCREAMING_SNAKE_CASE_ = None
# down
SCREAMING_SNAKE_CASE_ = in_channels
for i, down_block_type in enumerate(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
SCREAMING_SNAKE_CASE_ = i == len(__magic_name__ ) - 1
SCREAMING_SNAKE_CASE_ = get_down_block(
__magic_name__ , num_layers=__magic_name__ , in_channels=__magic_name__ , out_channels=__magic_name__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(__magic_name__ )
# mid
SCREAMING_SNAKE_CASE_ = get_mid_block(
__magic_name__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=__magic_name__ , add_downsample=__magic_name__ , )
# up
SCREAMING_SNAKE_CASE_ = list(reversed(__magic_name__ ) )
SCREAMING_SNAKE_CASE_ = reversed_block_out_channels[0]
if out_block_type is None:
SCREAMING_SNAKE_CASE_ = out_channels
else:
SCREAMING_SNAKE_CASE_ = block_out_channels[0]
for i, up_block_type in enumerate(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = output_channel
SCREAMING_SNAKE_CASE_ = (
reversed_block_out_channels[i + 1] if i < len(__magic_name__ ) - 1 else final_upsample_channels
)
SCREAMING_SNAKE_CASE_ = i == len(__magic_name__ ) - 1
SCREAMING_SNAKE_CASE_ = get_up_block(
__magic_name__ , num_layers=__magic_name__ , in_channels=__magic_name__ , out_channels=__magic_name__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(__magic_name__ )
SCREAMING_SNAKE_CASE_ = output_channel
# out
SCREAMING_SNAKE_CASE_ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
SCREAMING_SNAKE_CASE_ = get_out_block(
out_block_type=__magic_name__ , num_groups_out=__magic_name__ , embed_dim=block_out_channels[0] , out_channels=__magic_name__ , act_fn=__magic_name__ , fc_dim=block_out_channels[-1] // 4 , )
def __A ( self : Optional[int] , __magic_name__ : torch.FloatTensor , __magic_name__ : Union[torch.Tensor, float, int] , __magic_name__ : bool = True , ) -> Union[UNetaDOutput, Tuple]:
SCREAMING_SNAKE_CASE_ = timestep
if not torch.is_tensor(__magic_name__ ):
SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(__magic_name__ ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE_ = timesteps[None].to(sample.device )
SCREAMING_SNAKE_CASE_ = self.time_proj(__magic_name__ )
if self.config.use_timestep_embedding:
SCREAMING_SNAKE_CASE_ = self.time_mlp(__magic_name__ )
else:
SCREAMING_SNAKE_CASE_ = timestep_embed[..., None]
SCREAMING_SNAKE_CASE_ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
SCREAMING_SNAKE_CASE_ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
SCREAMING_SNAKE_CASE_ = ()
for downsample_block in self.down_blocks:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = downsample_block(hidden_states=__magic_name__ , temb=__magic_name__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
SCREAMING_SNAKE_CASE_ = self.mid_block(__magic_name__ , __magic_name__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
SCREAMING_SNAKE_CASE_ = down_block_res_samples[-1:]
SCREAMING_SNAKE_CASE_ = down_block_res_samples[:-1]
SCREAMING_SNAKE_CASE_ = upsample_block(__magic_name__ , res_hidden_states_tuple=__magic_name__ , temb=__magic_name__ )
# 5. post-process
if self.out_block:
SCREAMING_SNAKE_CASE_ = self.out_block(__magic_name__ , __magic_name__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=__magic_name__ )
| 140 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
A : Dict = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 140 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
A : Optional[Any] = '''open-llama'''
def __init__(self , _a=100_000 , _a=4_096 , _a=11_008 , _a=32 , _a=32 , _a="silu" , _a=2_048 , _a=0.02 , _a=1e-6 , _a=True , _a=0 , _a=1 , _a=2 , _a=False , _a=True , _a=0.1 , _a=0.1 , _a=True , _a=True , _a=None , **_a , ) -> str:
lowercase_ : List[Any] = vocab_size
lowercase_ : Union[str, Any] = max_position_embeddings
lowercase_ : List[str] = hidden_size
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : List[Any] = hidden_act
lowercase_ : Dict = initializer_range
lowercase_ : List[str] = rms_norm_eps
lowercase_ : Optional[int] = use_cache
lowercase_ : Optional[Any] = kwargs.pop(
'use_memorry_efficient_attention' , _a )
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : str = attention_dropout_prob
lowercase_ : List[str] = use_stable_embedding
lowercase_ : int = shared_input_output_embedding
lowercase_ : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , tie_word_embeddings=_a , **_a , )
def _lowerCamelCase (self ) -> int:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
lowercase_ : int = self.rope_scaling.get('type' , _a )
lowercase_ : Optional[int] = self.rope_scaling.get('factor' , _a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(_a , _a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 438 | '''simple docstring'''
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_A = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
A : List[str] = '''maskformer'''
A : List[str] = {'''hidden_size''': '''mask_feature_size'''}
A : Dict = ['''resnet''', '''swin''']
A : Optional[Any] = ['''detr''']
def __init__(self , _a = 256 , _a = 256 , _a = 0.1 , _a = False , _a = None , _a = None , _a = 0.02 , _a = 1.0 , _a = 1.0 , _a = 1.0 , _a = 20.0 , _a = None , **_a , ) -> int:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowercase_ : List[Any] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_a , _a ):
lowercase_ : Dict = backbone_config.pop('model_type' )
lowercase_ : Tuple = CONFIG_MAPPING[backbone_model_type]
lowercase_ : Tuple = config_class.from_dict(_a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
f'''Supported model types: {",".join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowercase_ : str = DetrConfig()
else:
# verify that the decoder is supported
lowercase_ : List[Any] = (
decoder_config.pop('model_type' ) if isinstance(_a , _a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
f'''Transformer Decoder {decoder_type} not supported, please use one of'''
f''' {",".join(self.decoders_supported )}''' )
if isinstance(_a , _a ):
lowercase_ : List[Any] = CONFIG_MAPPING[decoder_type]
lowercase_ : Tuple = config_class.from_dict(_a )
lowercase_ : Optional[Any] = backbone_config
lowercase_ : int = decoder_config
# main feature dimension for the model
lowercase_ : List[Any] = fpn_feature_size
lowercase_ : Tuple = mask_feature_size
# initializer
lowercase_ : Optional[int] = init_std
lowercase_ : List[str] = init_xavier_std
# Hungarian matcher && loss
lowercase_ : int = cross_entropy_weight
lowercase_ : Optional[int] = dice_weight
lowercase_ : Dict = mask_weight
lowercase_ : Tuple = use_auxiliary_loss
lowercase_ : int = no_object_weight
lowercase_ : Union[str, Any] = output_auxiliary_logits
lowercase_ : List[Any] = self.decoder_config.encoder_attention_heads
lowercase_ : Tuple = self.decoder_config.num_hidden_layers
super().__init__(**_a )
@classmethod
def _lowerCamelCase (cls , _a , _a , **_a ) -> Union[str, Any]:
return cls(
backbone_config=_a , decoder_config=_a , **_a , )
def _lowerCamelCase (self ) -> Dict[str, any]:
lowercase_ : Optional[int] = copy.deepcopy(self.__dict__ )
lowercase_ : Union[str, Any] = self.backbone_config.to_dict()
lowercase_ : Optional[Any] = self.decoder_config.to_dict()
lowercase_ : Dict = self.__class__.model_type
return output
| 438 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase_ : int = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Any = ['OwlViTFeatureExtractor']
UpperCAmelCase_ : Tuple = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
UpperCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 44 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : Optional[int] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Optional[int] , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : str ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : List[str] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : str ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Dict , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : Optional[Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : str , *lowerCamelCase_ : int , **lowerCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : List[Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Any , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : List[str] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Any , *lowerCamelCase_ : Any , **lowerCamelCase_ : Any ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Any , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : str ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : Any , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Any ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : Dict , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Dict ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Tuple , *lowerCamelCase_ : int , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : Optional[Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[str] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : Dict , *lowerCamelCase_ : int , **lowerCamelCase_ : Any ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : int , *lowerCamelCase_ : int , **lowerCamelCase_ : Any ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : int , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Any ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Any ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : List[Any] , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Any , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : Any , *lowerCamelCase_ : Dict , **lowerCamelCase_ : int ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : List[str] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : Dict , **lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
class UpperCamelCase__ ( metaclass=lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''flax''']
def __init__( self : Tuple , *lowerCamelCase_ : Tuple , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
requires_backends(cls , ["""flax"""] )
| 379 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase ( unittest.TestCase ):
@property
def __A ( self ):
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def __A ( self ):
_UpperCAmelCase = self.dummy_uncond_unet
_UpperCAmelCase = ScoreSdeVeScheduler()
_UpperCAmelCase = ScoreSdeVePipeline(unet=_lowercase , scheduler=_lowercase )
sde_ve.to(_lowercase )
sde_ve.set_progress_bar_config(disable=_lowercase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_lowercase ).images
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=_lowercase , return_dict=_lowercase )[
0
]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
def __A ( self ):
_UpperCAmelCase = 'google/ncsnpp-church-256'
_UpperCAmelCase = UNetaDModel.from_pretrained(_lowercase )
_UpperCAmelCase = ScoreSdeVeScheduler.from_pretrained(_lowercase )
_UpperCAmelCase = ScoreSdeVePipeline(unet=_lowercase , scheduler=_lowercase )
sde_ve.to(_lowercase )
sde_ve.set_progress_bar_config(disable=_lowercase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = sde_ve(num_inference_steps=10 , output_type='numpy' , generator=_lowercase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 707 |
"""simple docstring"""
import argparse
import os
import re
lowerCAmelCase_ = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase_ = re.compile(r'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
lowerCAmelCase_ = re.compile(r'''\s*\(\s*"(\S[^"]+)"''')
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE = False ) -> Optional[Any]:
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE,'r',encoding='utf-8' ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = content.split('\n' )
_UpperCAmelCase = []
_UpperCAmelCase = 0
while line_idx < len(SCREAMING_SNAKE_CASE ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_UpperCAmelCase = len(re.search(R'^(\s*)\S',lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
_UpperCAmelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_UpperCAmelCase = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_UpperCAmelCase = sorted(SCREAMING_SNAKE_CASE,key=lambda SCREAMING_SNAKE_CASE : _re_identifier.search(SCREAMING_SNAKE_CASE ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(SCREAMING_SNAKE_CASE,'w',encoding='utf-8' ) as f:
f.write('\n'.join(SCREAMING_SNAKE_CASE ) )
elif "\n".join(SCREAMING_SNAKE_CASE ) != content:
return True
def __lowerCamelCase ( SCREAMING_SNAKE_CASE = False ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) for f in os.listdir(SCREAMING_SNAKE_CASE ) if f.endswith('.py' )]
_UpperCAmelCase = [sort_auto_mapping(SCREAMING_SNAKE_CASE,overwrite=SCREAMING_SNAKE_CASE ) for fname in fnames]
if not overwrite and any(SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = [f for f, d in zip(SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(SCREAMING_SNAKE_CASE )}. Run `make style` to fix"""
' this.' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCAmelCase_ = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 494 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
a_ : str = tempfile.mkdtemp()
a_ : Union[str, Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
a_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
a_ : List[str] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
a_ : List[Any] = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self : Any , **lowercase__ : Dict ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self : Optional[Any] , **lowercase__ : List[Any] ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self : List[str] , **lowercase__ : int ):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def lowercase_ ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
a_ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a_ : Optional[int] = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Tuple ):
'''simple docstring'''
a_ : Dict = self.get_tokenizer()
a_ : Any = self.get_rust_tokenizer()
a_ : Tuple = self.get_image_processor()
a_ : str = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
a_ : str = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_UpperCAmelCase )
a_ : int = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
a_ : Optional[int] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _UpperCAmelCase )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
a_ : int = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
a_ : int = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
a_ : List[str] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
a_ : Optional[int] = self.get_image_processor()
a_ : int = self.get_tokenizer()
a_ : Dict = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
a_ : Optional[Any] = self.prepare_image_inputs()
a_ : Tuple = image_processor(_UpperCAmelCase , return_tensors="""np""" )
a_ : Dict = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
a_ : List[str] = self.get_image_processor()
a_ : str = self.get_tokenizer()
a_ : Optional[Any] = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
a_ : Any = """lower newer"""
a_ : List[Any] = processor(text=_UpperCAmelCase )
a_ : Optional[Any] = tokenizer(_UpperCAmelCase , padding="""max_length""" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
a_ : Union[str, Any] = self.get_image_processor()
a_ : Optional[Any] = self.get_tokenizer()
a_ : Tuple = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
a_ : List[str] = """lower newer"""
a_ : str = self.prepare_image_inputs()
a_ : List[Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def lowercase_ ( self : List[str] ):
'''simple docstring'''
a_ : List[Any] = self.get_image_processor()
a_ : Tuple = self.get_tokenizer()
a_ : Optional[Any] = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
a_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a_ : str = processor.batch_decode(_UpperCAmelCase )
a_ : Optional[Any] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
a_ : List[str] = self.get_image_processor()
a_ : List[Any] = self.get_tokenizer()
a_ : str = AlignProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
a_ : Any = """lower newer"""
a_ : List[Any] = self.prepare_image_inputs()
a_ : Tuple = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 442 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
assert x is not None
assert y is not None
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
# declaring the array for storing the dp values
UpperCAmelCase__ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
UpperCAmelCase__ = 1 if x[i - 1] == y[j - 1] else 0
UpperCAmelCase__ = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
UpperCAmelCase__ = """"""
UpperCAmelCase__ , UpperCAmelCase__ = m, n
while i > 0 and j > 0:
UpperCAmelCase__ = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
UpperCAmelCase__ = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
UpperCAmelCase_ = 'AGGTAB'
UpperCAmelCase_ = 'GXTXAYB'
UpperCAmelCase_ = 4
UpperCAmelCase_ = 'GTAB'
UpperCAmelCase_ , UpperCAmelCase_ = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod()
| 603 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = args.log_outputs
_SCREAMING_SNAKE_CASE = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
_SCREAMING_SNAKE_CASE = load_metric("""wer""" )
_SCREAMING_SNAKE_CASE = load_metric("""cer""" )
# compute metrics
_SCREAMING_SNAKE_CASE = wer.compute(references=result["""target"""] ,predictions=result["""prediction"""] )
_SCREAMING_SNAKE_CASE = cer.compute(references=result["""target"""] ,predictions=result["""prediction"""] )
# print & log results
_SCREAMING_SNAKE_CASE = F'WER: {wer_result}\nCER: {cer_result}'
print(snake_case__ )
with open(F'{dataset_id}_eval_results.txt' ,"""w""" ) as f:
f.write(snake_case__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
_SCREAMING_SNAKE_CASE = F'log_{dataset_id}_predictions.txt'
_SCREAMING_SNAKE_CASE = F'log_{dataset_id}_targets.txt'
with open(snake_case__ ,"""w""" ) as p, open(snake_case__ ,"""w""" ) as t:
# mapping function to write output
def write_to_file(snake_case__ ,snake_case__ ):
p.write(F'{i}' + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(F'{i}' + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(snake_case__ ,with_indices=snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
_SCREAMING_SNAKE_CASE = re.sub(snake_case__ ,"""""" ,text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
_SCREAMING_SNAKE_CASE = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
_SCREAMING_SNAKE_CASE = """ """.join(text.split(snake_case__ ) )
return text
def __lowerCamelCase ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = load_dataset(args.dataset ,args.config ,split=args.split ,use_auth_token=snake_case__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
_SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(args.model_id )
_SCREAMING_SNAKE_CASE = feature_extractor.sampling_rate
# resample audio
_SCREAMING_SNAKE_CASE = dataset.cast_column("""audio""" ,Audio(sampling_rate=snake_case__ ) )
# load eval pipeline
if args.device is None:
_SCREAMING_SNAKE_CASE = 0 if torch.cuda.is_available() else -1
_SCREAMING_SNAKE_CASE = pipeline("""automatic-speech-recognition""" ,model=args.model_id ,device=args.device )
# map function to decode audio
def map_to_pred(snake_case__ ):
_SCREAMING_SNAKE_CASE = asr(
batch["""audio"""]["""array"""] ,chunk_length_s=args.chunk_length_s ,stride_length_s=args.stride_length_s )
_SCREAMING_SNAKE_CASE = prediction["""text"""]
_SCREAMING_SNAKE_CASE = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
_SCREAMING_SNAKE_CASE = dataset.map(snake_case__ ,remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case__ ,snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
UpperCamelCase = parser.parse_args()
main(args)
| 710 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __lowerCamelCase ( snake_case__ ,snake_case__=0.999 ,snake_case__="cosine" ,) -> List[Any]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
_SCREAMING_SNAKE_CASE = []
for i in range(snake_case__ ):
_SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps
_SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) ,snake_case__ ) )
return torch.tensor(snake_case__ ,dtype=torch.floataa )
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ):
__snake_case : str = [e.name for e in KarrasDiffusionSchedulers]
__snake_case : Tuple = 2
@register_to_config
def __init__( self: Any , UpperCAmelCase_: int = 1_000 , UpperCAmelCase_: float = 0.0_00_85 , UpperCAmelCase_: float = 0.0_12 , UpperCAmelCase_: str = "linear" , UpperCAmelCase_: Optional[Union[np.ndarray, List[float]]] = None , UpperCAmelCase_: str = "epsilon" , UpperCAmelCase_: Optional[bool] = False , UpperCAmelCase_: Optional[bool] = False , UpperCAmelCase_: float = 1.0 , UpperCAmelCase_: str = "linspace" , UpperCAmelCase_: int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
_SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_SCREAMING_SNAKE_CASE = torch.linspace(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_SCREAMING_SNAKE_CASE = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCAmelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_SCREAMING_SNAKE_CASE = betas_for_alpha_bar(UpperCAmelCase_ , alpha_transform_type="""cosine""" )
elif beta_schedule == "exp":
_SCREAMING_SNAKE_CASE = betas_for_alpha_bar(UpperCAmelCase_ , alpha_transform_type="""exp""" )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
_SCREAMING_SNAKE_CASE = 1.0 - self.betas
_SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = use_karras_sigmas
def UpperCamelCase ( self: str , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: int=None ):
'''simple docstring'''
if schedule_timesteps is None:
_SCREAMING_SNAKE_CASE = self.timesteps
_SCREAMING_SNAKE_CASE = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_SCREAMING_SNAKE_CASE = 1 if len(UpperCAmelCase_ ) > 1 else 0
else:
_SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
_SCREAMING_SNAKE_CASE = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase ( self: str ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: torch.FloatTensor , UpperCAmelCase_: Union[float, torch.FloatTensor] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.index_for_timestep(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.sigmas[step_index]
_SCREAMING_SNAKE_CASE = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: int , UpperCAmelCase_: Union[str, torch.device] = None , UpperCAmelCase_: Optional[int] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = num_inference_steps
_SCREAMING_SNAKE_CASE = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_SCREAMING_SNAKE_CASE = np.linspace(0 , num_train_timesteps - 1 , UpperCAmelCase_ , dtype=UpperCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_SCREAMING_SNAKE_CASE = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_SCREAMING_SNAKE_CASE = (np.arange(0 , UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_SCREAMING_SNAKE_CASE = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_SCREAMING_SNAKE_CASE = (np.arange(UpperCAmelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
_SCREAMING_SNAKE_CASE = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_SCREAMING_SNAKE_CASE = np.log(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.interp(UpperCAmelCase_ , np.arange(0 , len(UpperCAmelCase_ ) ) , UpperCAmelCase_ )
if self.config.use_karras_sigmas:
_SCREAMING_SNAKE_CASE = self._convert_to_karras(in_sigmas=UpperCAmelCase_ , num_inference_steps=self.num_inference_steps )
_SCREAMING_SNAKE_CASE = np.array([self._sigma_to_t(UpperCAmelCase_ , UpperCAmelCase_ ) for sigma in sigmas] )
_SCREAMING_SNAKE_CASE = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_SCREAMING_SNAKE_CASE = torch.from_numpy(UpperCAmelCase_ ).to(device=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_SCREAMING_SNAKE_CASE = torch.from_numpy(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(UpperCAmelCase_ ).startswith("""mps""" ):
# mps does not support float64
_SCREAMING_SNAKE_CASE = timesteps.to(UpperCAmelCase_ , dtype=torch.floataa )
else:
_SCREAMING_SNAKE_CASE = timesteps.to(device=UpperCAmelCase_ )
# empty dt and derivative
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_SCREAMING_SNAKE_CASE = defaultdict(UpperCAmelCase_ )
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = np.log(UpperCAmelCase_ )
# get distribution
_SCREAMING_SNAKE_CASE = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_SCREAMING_SNAKE_CASE = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_SCREAMING_SNAKE_CASE = low_idx + 1
_SCREAMING_SNAKE_CASE = log_sigmas[low_idx]
_SCREAMING_SNAKE_CASE = log_sigmas[high_idx]
# interpolate sigmas
_SCREAMING_SNAKE_CASE = (low - log_sigma) / (low - high)
_SCREAMING_SNAKE_CASE = np.clip(UpperCAmelCase_ , 0 , 1 )
# transform interpolation to time range
_SCREAMING_SNAKE_CASE = (1 - w) * low_idx + w * high_idx
_SCREAMING_SNAKE_CASE = t.reshape(sigma.shape )
return t
def UpperCamelCase ( self: int , UpperCAmelCase_: torch.FloatTensor , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = in_sigmas[-1].item()
_SCREAMING_SNAKE_CASE = in_sigmas[0].item()
_SCREAMING_SNAKE_CASE = 7.0 # 7.0 is the value used in the paper
_SCREAMING_SNAKE_CASE = np.linspace(0 , 1 , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = sigma_min ** (1 / rho)
_SCREAMING_SNAKE_CASE = sigma_max ** (1 / rho)
_SCREAMING_SNAKE_CASE = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return self.dt is None
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCAmelCase_: Union[float, torch.FloatTensor] , UpperCAmelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCAmelCase_: bool = True , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.index_for_timestep(UpperCAmelCase_ )
# advance index counter by 1
_SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_SCREAMING_SNAKE_CASE = self.sigmas[step_index]
_SCREAMING_SNAKE_CASE = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_SCREAMING_SNAKE_CASE = self.sigmas[step_index - 1]
_SCREAMING_SNAKE_CASE = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_next
_SCREAMING_SNAKE_CASE = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_next
_SCREAMING_SNAKE_CASE = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_SCREAMING_SNAKE_CASE = model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
_SCREAMING_SNAKE_CASE = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_SCREAMING_SNAKE_CASE = sigma_next - sigma_hat
# store for 2nd order step
_SCREAMING_SNAKE_CASE = derivative
_SCREAMING_SNAKE_CASE = dt
_SCREAMING_SNAKE_CASE = sample
else:
# 2. 2nd order / Heun's method
_SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_next
_SCREAMING_SNAKE_CASE = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_SCREAMING_SNAKE_CASE = self.dt
_SCREAMING_SNAKE_CASE = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: torch.FloatTensor , UpperCAmelCase_: torch.FloatTensor , UpperCAmelCase_: torch.FloatTensor , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_ ):
# mps does not support float64
_SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device )
_SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device )
_SCREAMING_SNAKE_CASE = [self.index_for_timestep(UpperCAmelCase_ , UpperCAmelCase_ ) for t in timesteps]
_SCREAMING_SNAKE_CASE = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_SCREAMING_SNAKE_CASE = sigma.unsqueeze(-1 )
_SCREAMING_SNAKE_CASE = original_samples + noise * sigma
return noisy_samples
def __len__( self: str ):
'''simple docstring'''
return self.config.num_train_timesteps
| 569 | 0 |
"""simple docstring"""
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCamelCase : str = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[str] ):
lowerCAmelCase = np.argmax(_UpperCAmelCase , axis=1 )
return np.sum(outputs == labels )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
with open(_UpperCAmelCase , encoding='utf_8' ) as f:
lowerCAmelCase = csv.reader(_UpperCAmelCase )
lowerCAmelCase = []
next(_UpperCAmelCase ) # skip the first line
for line in tqdm(_UpperCAmelCase ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Dict ):
lowerCAmelCase = []
for dataset in encoded_datasets:
lowerCAmelCase = len(_UpperCAmelCase )
lowerCAmelCase = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
lowerCAmelCase = np.zeros((n_batch, 2) , dtype=np.intaa )
lowerCAmelCase = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
lowerCAmelCase = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_UpperCAmelCase ):
lowerCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
lowerCAmelCase = with_conta
lowerCAmelCase = with_conta
lowerCAmelCase = len(_UpperCAmelCase ) - 1
lowerCAmelCase = len(_UpperCAmelCase ) - 1
lowerCAmelCase = with_conta
lowerCAmelCase = with_conta
lowerCAmelCase = mc_label
lowerCAmelCase = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_UpperCAmelCase ) for t in all_inputs ) )
return tensor_datasets
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=_UpperCAmelCase , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=_UpperCAmelCase , default='' )
parser.add_argument('--eval_dataset' , type=_UpperCAmelCase , default='' )
parser.add_argument('--seed' , type=_UpperCAmelCase , default=42 )
parser.add_argument('--num_train_epochs' , type=_UpperCAmelCase , default=3 )
parser.add_argument('--train_batch_size' , type=_UpperCAmelCase , default=8 )
parser.add_argument('--eval_batch_size' , type=_UpperCAmelCase , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=_UpperCAmelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=_UpperCAmelCase , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=_UpperCAmelCase , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_UpperCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=_UpperCAmelCase , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=_UpperCAmelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=_UpperCAmelCase , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=_UpperCAmelCase , default=0.01 )
parser.add_argument('--lm_coef' , type=_UpperCAmelCase , default=0.9 )
parser.add_argument('--n_valid' , type=_UpperCAmelCase , default=374 )
parser.add_argument('--server_ip' , type=_UpperCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_UpperCAmelCase , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase = parser.parse_args()
print(_UpperCAmelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCAmelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
lowerCAmelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
lowerCAmelCase = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(_UpperCAmelCase , _UpperCAmelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
lowerCAmelCase = ['_start_', '_delimiter_', '_classify_']
lowerCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_UpperCAmelCase )
lowerCAmelCase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
lowerCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
model.to(_UpperCAmelCase )
# Load and encode the datasets
def tokenize_and_encode(_UpperCAmelCase : Optional[int] ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCAmelCase ) )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return obj
return [tokenize_and_encode(_UpperCAmelCase ) for o in obj]
logger.info('Encoding dataset...' )
lowerCAmelCase = load_rocstories_dataset(args.train_dataset )
lowerCAmelCase = load_rocstories_dataset(args.eval_dataset )
lowerCAmelCase = (train_dataset, eval_dataset)
lowerCAmelCase = tokenize_and_encode(_UpperCAmelCase )
# Compute the max input length for the Transformer
lowerCAmelCase = model.config.n_positions // 2 - 2
lowerCAmelCase = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
lowerCAmelCase = min(_UpperCAmelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
lowerCAmelCase = pre_process_datasets(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase )
lowerCAmelCase ,lowerCAmelCase = tensor_datasets[0], tensor_datasets[1]
lowerCAmelCase = TensorDataset(*_UpperCAmelCase )
lowerCAmelCase = RandomSampler(_UpperCAmelCase )
lowerCAmelCase = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase , batch_size=args.train_batch_size )
lowerCAmelCase = TensorDataset(*_UpperCAmelCase )
lowerCAmelCase = SequentialSampler(_UpperCAmelCase )
lowerCAmelCase = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
lowerCAmelCase = args.max_steps
lowerCAmelCase = args.max_steps // (len(_UpperCAmelCase ) // args.gradient_accumulation_steps) + 1
else:
lowerCAmelCase = len(_UpperCAmelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
lowerCAmelCase = list(model.named_parameters() )
lowerCAmelCase = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
lowerCAmelCase = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
lowerCAmelCase = AdamW(_UpperCAmelCase , lr=args.learning_rate , eps=args.adam_epsilon )
lowerCAmelCase = get_linear_schedule_with_warmup(
_UpperCAmelCase , num_warmup_steps=args.warmup_steps , num_training_steps=_UpperCAmelCase )
if args.do_train:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = tqdm(_UpperCAmelCase , desc='Training' )
for step, batch in enumerate(_UpperCAmelCase ):
lowerCAmelCase = tuple(t.to(_UpperCAmelCase ) for t in batch )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = batch
lowerCAmelCase = model(_UpperCAmelCase , mc_token_ids=_UpperCAmelCase , lm_labels=_UpperCAmelCase , mc_labels=_UpperCAmelCase )
lowerCAmelCase = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
lowerCAmelCase = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
lowerCAmelCase = 'Training loss: {:.2e} lr: {:.2e}'.format(_UpperCAmelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
lowerCAmelCase = model.module if hasattr(_UpperCAmelCase , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
lowerCAmelCase = os.path.join(args.output_dir , _UpperCAmelCase )
lowerCAmelCase = os.path.join(args.output_dir , _UpperCAmelCase )
torch.save(model_to_save.state_dict() , _UpperCAmelCase )
model_to_save.config.to_json_file(_UpperCAmelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
lowerCAmelCase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
lowerCAmelCase = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_UpperCAmelCase )
if args.do_eval:
model.eval()
lowerCAmelCase ,lowerCAmelCase = 0, 0
lowerCAmelCase ,lowerCAmelCase = 0, 0
for batch in tqdm(_UpperCAmelCase , desc='Evaluating' ):
lowerCAmelCase = tuple(t.to(_UpperCAmelCase ) for t in batch )
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = batch
with torch.no_grad():
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = model(
_UpperCAmelCase , mc_token_ids=_UpperCAmelCase , lm_labels=_UpperCAmelCase , mc_labels=_UpperCAmelCase )
lowerCAmelCase = mc_logits.detach().cpu().numpy()
lowerCAmelCase = mc_labels.to('cpu' ).numpy()
lowerCAmelCase = accuracy(_UpperCAmelCase , _UpperCAmelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
lowerCAmelCase = eval_loss / nb_eval_steps
lowerCAmelCase = eval_accuracy / nb_eval_examples
lowerCAmelCase = tr_loss / nb_tr_steps if args.do_train else None
lowerCAmelCase = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
lowerCAmelCase = os.path.join(args.output_dir , 'eval_results.txt' )
with open(_UpperCAmelCase , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _UpperCAmelCase , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 4 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __a ( ) -> Optional[Any]:
'''simple docstring'''
A__ = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=A , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=A , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=A )
return parser.parse_args()
def __a ( ) -> List[str]:
'''simple docstring'''
A__ = parse_args()
# Import training_script as a module.
A__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
A__ = script_fpath.stem
A__ = importlib.import_module(A )
# Patch sys.argv
A__ = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 337 | 0 |
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __lowerCamelCase ( _UpperCamelCase : str = "isbn/0140328726" ):
'''simple docstring'''
UpperCAmelCase_ = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
UpperCAmelCase_ = F"""{olid} is not a valid Open Library olid"""
raise ValueError(snake_case__ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def __lowerCamelCase ( _UpperCamelCase : dict ):
'''simple docstring'''
UpperCAmelCase_ = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
UpperCAmelCase_ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCAmelCase_ = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
UpperCAmelCase_ = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase_ = ''', '''.join(snake_case__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowercase__ : List[Any] = input("\nEnter the ISBN code to search (or \'quit\' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
lowercase__ : List[Any] = summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 700 | '''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : list[str] ):
'''simple docstring'''
UpperCAmelCase_ = ''''''
for word_or_phrase in separated:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(_UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 43 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a =42
__a =42
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> list[str]:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str ) -> BWTTransformDict:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
__a = all_rotations(SCREAMING_SNAKE_CASE__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
__a = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE__ ),
}
return response
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: str, SCREAMING_SNAKE_CASE__: int ) -> str:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
__a = int(SCREAMING_SNAKE_CASE__ )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
__a = [''] * len(SCREAMING_SNAKE_CASE__ )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__a = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__UpperCamelCase : Any = """Provide a string that I will generate its BWT transform: """
__UpperCamelCase : Dict = input(entry_msg).strip()
__UpperCamelCase : Optional[int] = bwt_transform(s)
print(
f"""Burrows Wheeler transform for string '{s}' results """
f"""in '{result["bwt_string"]}'"""
)
__UpperCamelCase : Dict = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
f"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """
f"""we get original string '{original_string}'"""
) | 448 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a =["pixel_values"]
def __init__( self , lowerCamelCase = True , lowerCamelCase = 32 , lowerCamelCase=PILImageResampling.BILINEAR , lowerCamelCase = True , **lowerCamelCase , ) ->None:
'''simple docstring'''
__a = do_resize
__a = do_rescale
__a = size_divisor
__a = resample
super().__init__(**lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) ->np.ndarray:
'''simple docstring'''
__a , __a = get_image_size(lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__a = height // size_divisor * size_divisor
__a = width // size_divisor * size_divisor
__a = resize(lowerCamelCase , (new_h, new_w) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
return image
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ) ->np.ndarray:
'''simple docstring'''
return rescale(image=lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase=None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) ->BatchFeature:
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = size_divisor if size_divisor is not None else self.size_divisor
__a = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for img in images]
if do_resize:
__a = [self.resize(lowerCamelCase , size_divisor=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(lowerCamelCase , scale=1 / 255 ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {'pixel_values': images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase ) | 448 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _lowercase :
def __init__( self , _UpperCAmelCase , ):
A : Optional[int] = parent
A : List[Any] = 13
A : Optional[int] = 7
A : Any = True
A : Tuple = True
A : List[Any] = False
A : Any = True
A : Optional[int] = 99
A : int = 32
A : Dict = 2
A : Dict = 4
A : Optional[int] = 37
A : Tuple = '''gelu'''
A : Optional[Any] = 0.1
A : Dict = 0.1
A : str = 512
A : List[Any] = 16
A : Dict = 2
A : List[str] = 0.02
A : int = 3
A : Optional[int] = 4
A : Tuple = None
def snake_case ( self ):
A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : int = None
if self.use_input_mask:
A : int = random_attention_mask([self.batch_size, self.seq_length] )
A : Tuple = None
A : str = None
A : List[Any] = None
if self.use_labels:
A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : Tuple = ids_tensor([self.batch_size] , self.num_choices )
A : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
A : Tuple = TFDistilBertModel(config=UpperCAmelCase__ )
A : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
A : Union[str, Any] = model(UpperCAmelCase__ )
A : Tuple = [input_ids, input_mask]
A : int = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
A : Optional[int] = TFDistilBertForMaskedLM(config=UpperCAmelCase__ )
A : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
A : Union[str, Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
A : Union[str, Any] = TFDistilBertForQuestionAnswering(config=UpperCAmelCase__ )
A : List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
A : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
A : Any = self.num_labels
A : Optional[int] = TFDistilBertForSequenceClassification(UpperCAmelCase__ )
A : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
A : List[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
A : Dict = self.num_choices
A : Tuple = TFDistilBertForMultipleChoice(UpperCAmelCase__ )
A : Dict = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
A : str = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
A : Tuple = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
A : Optional[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
A : Optional[Any] = self.num_labels
A : Any = TFDistilBertForTokenClassification(UpperCAmelCase__ )
A : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
A : List[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ):
A : List[str] = self.prepare_config_and_inputs()
(A) : Union[str, Any] = config_and_inputs
A : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( a , a , unittest.TestCase ):
_UpperCamelCase = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_UpperCamelCase = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = False
def snake_case ( self ):
A : List[Any] = TFDistilBertModelTester(self )
A : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase__ , dim=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase__ )
def snake_case ( self ):
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase__ )
def snake_case ( self ):
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase__ )
def snake_case ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase__ )
def snake_case ( self ):
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase__ )
def snake_case ( self ):
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase__ )
@slow
def snake_case ( self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
A : str = TFDistilBertModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def snake_case ( self ):
A : Union[str, Any] = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
A : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
A : Tuple = model(UpperCAmelCase__ )[0]
A : List[str] = [1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase__ )
A : Dict = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1E-4 )
| 718 |
'''simple docstring'''
from __future__ import annotations
class _lowercase :
def __init__( self , _UpperCAmelCase ):
A : str = data
A : Node | None = None
A : Node | None = None
def _lowerCamelCase( UpperCamelCase__ : Node | None ) -> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _lowerCamelCase( UpperCamelCase__ : Node | None ) -> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _lowerCamelCase( UpperCamelCase__ : Node ) -> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _lowerCamelCase( ) -> None: # Main function for testing.
A : Optional[int] = Node(1 )
A : Tuple = Node(2 )
A : Dict = Node(3 )
A : List[str] = Node(4 )
A : Union[str, Any] = Node(5 )
A : str = Node(6 )
A : Any = Node(7 )
A : str = Node(8 )
A : Optional[int] = Node(9 )
print(is_full_binary_tree(UpperCamelCase__ ) )
print(depth_of_tree(UpperCamelCase__ ) )
print('''Tree is: ''' )
display(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 537 | 0 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = '''▁'''
__snake_case = {'''vocab_file''': '''prophetnet.tokenizer'''}
__snake_case = {
'''vocab_file''': {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'''
),
}
}
__snake_case = {
'''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False},
}
__snake_case = {
'''microsoft/xprophetnet-large-wiki100-cased''': 5_1_2,
}
def _A ( _lowercase ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = collections.OrderedDict()
with open(_lowercase , 'r' , encoding='utf-8' ) as reader:
__UpperCamelCase = reader.readlines()
for index, token in enumerate(_lowercase ):
__UpperCamelCase = token.rstrip('\n' )
__UpperCamelCase = index
return vocab
class __lowerCamelCase (_a ):
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self: str,A_: int,A_: str="[SEP]",A_: List[Any]="[SEP]",A_: str="[SEP]",A_: Any="[UNK]",A_: Optional[int]="[PAD]",A_: List[str]="[CLS]",A_: Dict="[MASK]",A_: Optional[Dict[str, Any]] = None,**A_: str,):
'''simple docstring'''
__UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_,eos_token=A_,sep_token=A_,unk_token=A_,pad_token=A_,cls_token=A_,mask_token=A_,sp_model_kwargs=self.sp_model_kwargs,**A_,)
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A_ ) )
__UpperCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__UpperCamelCase = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(10 ):
__UpperCamelCase = F'''[unused{i}]'''
__UpperCamelCase = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__UpperCamelCase = 12
__UpperCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(A_ )
def __getstate__( self: Union[str, Any] ):
'''simple docstring'''
__UpperCamelCase = self.__dict__.copy()
__UpperCamelCase = None
return state
def __setstate__( self: List[Any],A_: List[Any] ):
'''simple docstring'''
__UpperCamelCase = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self,'sp_model_kwargs' ):
__UpperCamelCase = {}
__UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self: Any,A_: List[int],A_: Optional[List[int]] = None,A_: bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_,token_ids_a=A_,already_has_special_tokens=A_ )
if token_ids_a is None:
return ([0] * len(A_ )) + [1]
return ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1]
def snake_case_ ( self: Optional[int],A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self: Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def snake_case_ ( self: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self: List[Any],A_: str ):
'''simple docstring'''
return self.sp_model.encode(A_,out_type=A_ )
def snake_case_ ( self: Any,A_: Optional[int] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCamelCase = self.sp_model.PieceToId(A_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self: str,A_: int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self: Tuple,A_: int ):
'''simple docstring'''
__UpperCamelCase = ''.join(A_ ).replace(A_,' ' ).strip()
return out_string
def snake_case_ ( self: Optional[int],A_: str,A_: Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(A_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCamelCase = os.path.join(
A_,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_,'wb' ) as fi:
__UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,)
def snake_case_ ( self: Tuple,A_: List[int],A_: Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__UpperCamelCase = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 1 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__A =True
except (ImportError, ModuleNotFoundError):
__A =False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _UpperCamelCase ( UpperCamelCase__ ):
re.sub("""<n>""" , """""" , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) ) | 407 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : int = 50 ):
'''simple docstring'''
lowerCAmelCase_ : Dict = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 701 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__A : Dict = "http://www.mocksite.com/file1.txt"
__A : List[str] = "\"text\": [\"foo\", \"foo\"]"
__A : int = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class __snake_case :
"""simple docstring"""
lowercase = 2_00
lowercase = {'Content-Length': '100'}
lowercase = {}
def __lowercase ( self : Union[str, Any] , **lowerCamelCase : Optional[int] ) -> str:
return [bytes(lowerCamelCase , """utf-8""" )]
def UpperCamelCase_ ( *A__ : List[str] , **A__ : Union[str, Any] ):
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def UpperCamelCase_ ( A__ : List[Any] , A__ : List[Any] , A__ : str ):
'''simple docstring'''
import requests
monkeypatch.setattr(A__ , """request""" , A__ )
lowerCAmelCase_ : Tuple = URL
if issubclass(A__ , A__ ):
lowerCAmelCase_ : Optional[Any] = url
elif issubclass(A__ , A__ ):
lowerCAmelCase_ : Dict = [url]
elif issubclass(A__ , A__ ):
lowerCAmelCase_ : Tuple = {"""train""": url}
lowerCAmelCase_ : List[Any] = """dummy"""
lowerCAmelCase_ : str = """downloads"""
lowerCAmelCase_ : Dict = tmp_path
lowerCAmelCase_ : Any = DownloadConfig(
cache_dir=os.path.join(A__ , A__ ) , use_etag=A__ , )
lowerCAmelCase_ : List[Any] = DownloadManager(dataset_name=A__ , download_config=A__ )
lowerCAmelCase_ : int = dl_manager.download(A__ )
lowerCAmelCase_ : Any = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(A__ , A__ ):
lowerCAmelCase_ : str = [downloaded_paths]
lowerCAmelCase_ : Any = [urls]
elif isinstance(A__ , A__ ):
assert "train" in downloaded_paths.keys()
lowerCAmelCase_ : Union[str, Any] = downloaded_paths.values()
lowerCAmelCase_ : Optional[Any] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(A__ , A__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowerCAmelCase_ : Tuple = Path(A__ )
lowerCAmelCase_ : List[Any] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowerCAmelCase_ : Optional[Any] = downloaded_path.read_text()
assert content == CONTENT
lowerCAmelCase_ : Tuple = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
lowerCAmelCase_ : Tuple = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : List[Any] , A__ : List[str] ):
'''simple docstring'''
lowerCAmelCase_ : int = str(A__ )
if issubclass(A__ , A__ ):
lowerCAmelCase_ : int = filename
elif issubclass(A__ , A__ ):
lowerCAmelCase_ : List[str] = [filename]
elif issubclass(A__ , A__ ):
lowerCAmelCase_ : Union[str, Any] = {"""train""": filename}
lowerCAmelCase_ : Optional[int] = """dummy"""
lowerCAmelCase_ : str = xz_file.parent
lowerCAmelCase_ : List[str] = """extracted"""
lowerCAmelCase_ : Union[str, Any] = DownloadConfig(
cache_dir=A__ , use_etag=A__ , )
lowerCAmelCase_ : str = DownloadManager(dataset_name=A__ , download_config=A__ )
lowerCAmelCase_ : Union[str, Any] = dl_manager.extract(A__ )
lowerCAmelCase_ : List[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(A__ , A__ ):
lowerCAmelCase_ : List[str] = [extracted_paths]
lowerCAmelCase_ : Union[str, Any] = [paths]
elif isinstance(A__ , A__ ):
assert "train" in extracted_paths.keys()
lowerCAmelCase_ : Union[str, Any] = extracted_paths.values()
lowerCAmelCase_ : int = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(A__ , A__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowerCAmelCase_ : int = Path(A__ )
lowerCAmelCase_ : Optional[Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(A__ , etag=A__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowerCAmelCase_ : Any = extracted_path.read_text()
lowerCAmelCase_ : Optional[Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def UpperCamelCase_ ( A__ : Dict , A__ : Any ):
'''simple docstring'''
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(A__ , start=1 ):
lowerCAmelCase_ : int = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def UpperCamelCase_ ( A__ : Optional[Any] , A__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = request.getfixturevalue(A__ )
lowerCAmelCase_ : List[Any] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def UpperCamelCase_ ( A__ : str , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = request.getfixturevalue(A__ )
lowerCAmelCase_ : str = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_tar == 1
assert num_jsonl == 2
def UpperCamelCase_ ( A__ : Tuple ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(A__ ) , start=1 ):
assert os.path.basename(A__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 398 | 0 |
'''simple docstring'''
from __future__ import annotations
A_ : str = "Muhammad Umer Farooq"
A_ : Optional[Any] = "MIT"
A_ : int = "1.0.0"
A_ : int = "Muhammad Umer Farooq"
A_ : int = "[email protected]"
A_ : Dict = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
super().__init__()
snake_case__ : list[str] = []
snake_case__ : List[Any] = domain
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case__ : str = parse.urljoin(self.domain , __SCREAMING_SNAKE_CASE )
self.urls.append(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return ".".join(get_sub_domain_name(__magic_name__ ).split(""".""" )[-2:] )
def UpperCamelCase__ ( __magic_name__ : str ) -> str:
'''simple docstring'''
return parse.urlparse(__magic_name__ ).netloc
def UpperCamelCase__ ( __magic_name__ : str = "https://github.com" ) -> list[str]:
'''simple docstring'''
snake_case__ : List[str] = get_domain_name(__magic_name__ )
# Initialize the parser
snake_case__ : Optional[Any] = Parser(__magic_name__ )
try:
# Open URL
snake_case__ : Any = requests.get(__magic_name__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case__ : List[str] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case__ : Tuple = requests.get(__magic_name__ )
# Get the valid email.
snake_case__ : List[str] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__magic_name__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__magic_name__ )
if __name__ == "__main__":
A_ : str = emails_from_url("https://github.com")
print(F'{len(emails)} emails found:')
print("\n".join(sorted(emails)))
| 38 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=[1, 3_8_4, 2_4, 2_4] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : str = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = is_training
snake_case__ : int = use_labels
snake_case__ : str = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : str = backbone_out_indices
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = backbone_featmap_shape
snake_case__ : List[Any] = scope
snake_case__ : Optional[Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
snake_case__ : List[Any] = (image_size // patch_size) ** 2
snake_case__ : Union[str, Any] = num_patches + 1
def __UpperCamelCase ( self ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : str = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
snake_case__ : Any = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [9_6, 1_9_2, 3_8_4, 7_6_8],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = DPTModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : str = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_labels
snake_case__ : Dict = DPTForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : Any = config_and_inputs
snake_case__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = DPTModelTester(self )
snake_case__ : Any = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__SCREAMING_SNAKE_CASE )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
snake_case__ : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = False
snake_case__ : str = True
if model_class in get_values(__SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
snake_case__ : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
snake_case__ : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = _config_zero_init(__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(config=__SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
snake_case__ : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
snake_case__ : Optional[int] = [f"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
@slow
def __UpperCamelCase ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
snake_case__ : List[str] = DPTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = """add"""
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = DPTForDepthEstimation(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( ) -> Dict:
'''simple docstring'''
snake_case__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : Dict = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
snake_case__ : Union[str, Any] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
snake_case__ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = outputs.predicted_depth
# verify the predicted depth
snake_case__ : Any = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38 | 1 |
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( lowercase__ : list ):
'''simple docstring'''
if len(lowercase__ ) == 0:
return []
a__ , a__ = min(lowercase__ ), max(lowercase__ )
a__ = int(max_value - min_value ) + 1
a__ = [[] for _ in range(lowercase__ )]
for i in my_list:
buckets[int(i - min_value )].append(lowercase__ )
return [v for bucket in buckets for v in sorted(lowercase__ )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 721 |
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if len(lowerCamelCase ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
a__ = list(lowerCamelCase )
a__ = degree
def __add__( self , lowerCamelCase ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
a__ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , lowerCamelCase )
else:
a__ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , lowerCamelCase )
def __sub__( self , lowerCamelCase ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ):
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , lowerCamelCase ):
'''simple docstring'''
a__ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , lowerCamelCase )
def _A ( self , lowerCamelCase ):
'''simple docstring'''
a__ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ):
'''simple docstring'''
a__ = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCamelCase )
return polynomial
def __repr__( self ):
'''simple docstring'''
return self.__str__()
def _A ( self ):
'''simple docstring'''
a__ = [0] * self.degree
for i in range(self.degree ):
a__ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , lowerCamelCase )
def _A ( self , lowerCamelCase = 0 ):
'''simple docstring'''
a__ = [0] * (self.degree + 2)
a__ = constant
for i in range(self.degree + 1 ):
a__ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , lowerCamelCase )
def __eq__( self , lowerCamelCase ):
'''simple docstring'''
if not isinstance(lowerCamelCase , lowerCamelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , lowerCamelCase ):
'''simple docstring'''
return not self.__eq__(lowerCamelCase )
| 412 | 0 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.17.0.dev0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class A_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
SCREAMING_SNAKE_CASE_ = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
SCREAMING_SNAKE_CASE_ = field(
default=1_024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=A__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
SCREAMING_SNAKE_CASE_ = field(
default=A__ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=A__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=A__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=A__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=A__ , metadata={"""help""": """A csv or a json file containing the training data."""} )
SCREAMING_SNAKE_CASE_ = field(
default=A__ , metadata={"""help""": """A csv or a json file containing the validation data."""} )
SCREAMING_SNAKE_CASE_ = field(default=A__ , metadata={"""help""": """A csv or a json file containing the test data."""} )
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
lowerCamelCase__ : List[Any] =self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase__ : List[str] =self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class A_ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = field(
default=A__ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
SCREAMING_SNAKE_CASE_ = field(
default=A__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE_ = field(
default=A__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE_ = field(
default=A__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
SCREAMING_SNAKE_CASE_ = field(
default=A__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
SCREAMING_SNAKE_CASE_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
SCREAMING_SNAKE_CASE_ = field(
default=A__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowerCAmelCase_ ( ) ->int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase__ : Optional[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] =parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCamelCase__ : Tuple =training_args.get_process_log_level()
logger.setLevel(snake_case_ )
datasets.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase__ : str =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ : Union[str, Any] =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase__ : Union[str, Any] =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase__ : List[str] ={'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase__ : Optional[int] =data_args.train_file.split('.' )[-1]
lowerCamelCase__ : List[Any] =data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase__ : Optional[int] =data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
lowerCamelCase__ : Any =load_dataset('csv' , data_files=snake_case_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase__ : Optional[Any] =load_dataset('json' , data_files=snake_case_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase__ : List[str] =raw_datasets['train'].features['label'].names
lowerCamelCase__ : Dict =len(snake_case_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ : List[Any] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowerCamelCase__ : Tuple =TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=snake_case_ , )
lowerCamelCase__ : Optional[Any] =BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase__ : int ='max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase__ : List[str] =False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase__ : Tuple ={'Refused': 0, 'Entailed': 1}
lowerCamelCase__ : Union[str, Any] ={0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCamelCase__ : Optional[Any] =min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(snake_case_ : Tuple ):
# Tokenize the texts
def _convert_table_text_to_pandas(snake_case_ : Optional[int] ):
lowerCamelCase__ : Union[str, Any] =[_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
lowerCamelCase__ : str =pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowerCamelCase__ : Optional[Any] =examples['statement']
lowerCamelCase__ : List[str] =list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
lowerCamelCase__ : str =tokenizer(snake_case_ , snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ )
lowerCamelCase__ : int =examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
lowerCamelCase__ : Tuple =raw_datasets.map(
snake_case_ , batched=snake_case_ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
lowerCamelCase__ : Dict =raw_datasets['train']
if data_args.max_train_samples is not None:
lowerCamelCase__ : Any =train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
lowerCamelCase__ : Tuple =raw_datasets['validation']
if data_args.max_eval_samples is not None:
lowerCamelCase__ : Optional[Any] =eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
lowerCamelCase__ : List[str] =raw_datasets['test']
if data_args.max_predict_samples is not None:
lowerCamelCase__ : str =predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(snake_case_ ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case_ : EvalPrediction ):
lowerCamelCase__ : Tuple =p.predictions[0] if isinstance(p.predictions , snake_case_ ) else p.predictions
lowerCamelCase__ : str =np.argmax(snake_case_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase__ : Dict =default_data_collator
elif training_args.fpaa:
lowerCamelCase__ : str =DataCollatorWithPadding(snake_case_ , pad_to_multiple_of=8 )
else:
lowerCamelCase__ : Optional[Any] =None
# Initialize our Trainer
lowerCamelCase__ : Optional[int] =Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=snake_case_ , tokenizer=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
lowerCamelCase__ : Dict =None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ : List[Any] =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ : str =last_checkpoint
lowerCamelCase__ : Any =trainer.train(resume_from_checkpoint=snake_case_ )
lowerCamelCase__ : List[str] =train_result.metrics
lowerCamelCase__ : Union[str, Any] =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case_ )
)
lowerCamelCase__ : int =min(snake_case_ , len(snake_case_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , snake_case_ )
trainer.save_metrics('train' , snake_case_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCamelCase__ : Dict =trainer.evaluate(eval_dataset=snake_case_ )
lowerCamelCase__ : Dict =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case_ )
lowerCamelCase__ : Any =min(snake_case_ , len(snake_case_ ) )
trainer.log_metrics('eval' , snake_case_ )
trainer.save_metrics('eval' , snake_case_ )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase__ : Tuple =predict_dataset.remove_columns('label' )
lowerCamelCase__ : Union[str, Any] =trainer.predict(snake_case_ , metric_key_prefix='predict' ).predictions
lowerCamelCase__ : Optional[int] =np.argmax(snake_case_ , axis=1 )
lowerCamelCase__ : Dict =os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(snake_case_ , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(snake_case_ ):
lowerCamelCase__ : Optional[int] =label_list[item]
writer.write(f"""{index}\t{item}\n""" )
lowerCamelCase__ : Union[str, Any] ={'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case_ )
else:
trainer.create_model_card(**snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Tuple ) ->List[str]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 174 |
"""simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCAmelCase = """base_with_context"""
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : int ) ->Tuple:
lowerCamelCase__ : Any =nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
lowerCamelCase__ : int =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case_ )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__ : Union[str, Any] =weights[f"""layers_{lyr_num}"""]
lowerCamelCase__ : Any =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
lowerCamelCase__ : List[Any] =ly_weight['attention']
lowerCamelCase__ : Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase__ : str =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase__ : List[str] =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase__ : Tuple =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase__ : int =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowerCamelCase__ : Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowerCamelCase__ : Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowerCamelCase__ : List[str] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowerCamelCase__ : int =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Any ) ->Union[str, Any]:
lowerCamelCase__ : Tuple =nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
lowerCamelCase__ : Optional[int] =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case_ )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__ : List[Any] =weights[f"""layers_{lyr_num}"""]
lowerCamelCase__ : List[str] =ly_weight['attention']
lowerCamelCase__ : int =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase__ : Optional[int] =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase__ : Optional[Any] =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase__ : Optional[int] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase__ : Optional[Any] =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
lowerCamelCase__ : Dict =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowerCamelCase__ : int =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowerCamelCase__ : List[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowerCamelCase__ : str =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowerCamelCase__ : Dict =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : List[Any] ) ->Optional[int]:
lowerCamelCase__ : List[str] =nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
lowerCamelCase__ : Optional[Any] =nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
lowerCamelCase__ : Any =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=snake_case_ )
lowerCamelCase__ : Tuple =nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCamelCase__ : Tuple =weights[f"""layers_{lyr_num}"""]
lowerCamelCase__ : List[str] =nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
lowerCamelCase__ : Optional[int] =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
lowerCamelCase__ : Union[str, Any] =ly_weight['self_attention']
lowerCamelCase__ : Optional[int] =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase__ : List[Any] =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase__ : int =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase__ : Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase__ : int =ly_weight['MultiHeadDotProductAttention_0']
lowerCamelCase__ : Any =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
lowerCamelCase__ : List[str] =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
lowerCamelCase__ : List[str] =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
lowerCamelCase__ : Optional[Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
lowerCamelCase__ : Tuple =nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
lowerCamelCase__ : int =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
lowerCamelCase__ : Any =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
lowerCamelCase__ : Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
lowerCamelCase__ : Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
lowerCamelCase__ : List[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
lowerCamelCase__ : int =nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
lowerCamelCase__ : List[str] =nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) ->List[Any]:
lowerCamelCase__ : Optional[int] =checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCamelCase__ : Optional[Any] =jnp.tree_util.tree_map(onp.array , snake_case_ )
lowerCamelCase__ : Dict =[
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
lowerCamelCase__ : Optional[int] =os.path.join(args.checkpoint_path , '..' , 'config.gin' )
lowerCamelCase__ : Optional[int] =inference.parse_training_gin_file(snake_case_ , snake_case_ )
lowerCamelCase__ : Tuple =inference.InferenceModel(args.checkpoint_path , snake_case_ )
lowerCamelCase__ : List[Any] =DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
lowerCamelCase__ : int =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
lowerCamelCase__ : Optional[Any] =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
lowerCamelCase__ : int =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCamelCase__ : str =load_notes_encoder(ta_checkpoint['target']['token_encoder'] , snake_case_ )
lowerCamelCase__ : int =load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , snake_case_ )
lowerCamelCase__ : Any =load_decoder(ta_checkpoint['target']['decoder'] , snake_case_ )
lowerCamelCase__ : Any =OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
lowerCamelCase__ : Optional[int] =SpectrogramDiffusionPipeline(
notes_encoder=snake_case_ , continuous_encoder=snake_case_ , decoder=snake_case_ , scheduler=snake_case_ , melgan=snake_case_ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
lowerCAmelCase = parser.parse_args()
main(args) | 174 | 1 |
'''simple docstring'''
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
return 1 if input_a == input_a else 0
def _lowerCAmelCase ():
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 718 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase_ : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = ['''pixel_values''']
def __init__( self : Any ,a__ : bool = True ,a__ : int = 32 ,a__ : Any=PILImageResampling.BILINEAR ,a__ : bool = True ,**a__ : str ,):
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**a__ )
def lowerCAmelCase_ ( self : List[Any] ,a__ : np.ndarray ,a__ : int ,a__ : Tuple ,a__ : Optional[ChannelDimension] = None ,**a__ : Any ):
a__ , a__ = get_image_size(a__ )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(a__ ,(new_h, new_w) ,resample=a__ ,data_format=a__ ,**a__ )
return image
def lowerCAmelCase_ ( self : str ,a__ : np.ndarray ,a__ : float ,a__ : Optional[ChannelDimension] = None ,**a__ : Optional[Any] ):
return rescale(image=a__ ,scale=a__ ,data_format=a__ ,**a__ )
def lowerCAmelCase_ ( self : int ,a__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,a__ : Optional[bool] = None ,a__ : Optional[int] = None ,a__ : str=None ,a__ : Optional[bool] = None ,a__ : Optional[Union[TensorType, str]] = None ,a__ : ChannelDimension = ChannelDimension.FIRST ,**a__ : List[str] ,):
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
a__ = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(a__ ) for img in images]
if do_resize:
a__ = [self.resize(a__ ,size_divisor=a__ ,resample=a__ ) for image in images]
if do_rescale:
a__ = [self.rescale(a__ ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(a__ ,a__ ) for image in images]
a__ = {"pixel_values": images}
return BatchFeature(data=a__ ,tensor_type=a__ )
| 394 | 0 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
UpperCAmelCase__ : int = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
SCREAMING_SNAKE_CASE__ : Dict = os.path.abspath("""examples""" )
for item in os.listdir(lowercase__ ):
if item not in EXCLUDE_EXAMPLES:
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(lowercase__ , lowercase__ )
if os.path.isfile(lowercase__ ) and ".py" in item_path:
with self.subTest(
tested_script=lowercase__ , feature_script=lowercase__ , tested_section="""main()""" if parser_only else """training_function()""" , ):
SCREAMING_SNAKE_CASE__ : Any = compare_against_test(
os.path.join(lowercase__ , lowercase__ ) , lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = """\n""".join(lowercase__ )
if special_strings is not None:
for string in special_strings:
SCREAMING_SNAKE_CASE__ : Optional[Any] = diff.replace(lowercase__ , """""" )
self.assertEqual(lowercase__ , """""" )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
self.one_complete_example("""complete_nlp_example.py""" , lowercase__ )
self.one_complete_example("""complete_nlp_example.py""" , lowercase__ )
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
SCREAMING_SNAKE_CASE__ : Any = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , lowercase__ , lowercase__ , lowercase__ )
self.one_complete_example("""complete_cv_example.py""" , lowercase__ , lowercase__ , lowercase__ )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class lowerCAmelCase_ (_a ):
"""simple docstring"""
__UpperCamelCase : List[Any] = False
@classmethod
def __magic_name__ (cls ) -> int:
"""simple docstring"""
super().setUpClass()
SCREAMING_SNAKE_CASE__ : str = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
SCREAMING_SNAKE_CASE__ : List[Any] = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def __magic_name__ (cls ) -> List[str]:
"""simple docstring"""
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
SCREAMING_SNAKE_CASE__ : str = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
'''.split()
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(self._launch_args + testargs , return_stdout=lowercase__ )
self.assertNotIn("""epoch 0:""" , lowercase__ )
self.assertIn("""epoch 1:""" , lowercase__ )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
'''.split()
SCREAMING_SNAKE_CASE__ : int = run_command(self._launch_args + testargs , return_stdout=lowercase__ )
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : Any = torch.cuda.device_count()
else:
SCREAMING_SNAKE_CASE__ : List[str] = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , lowercase__ )
self.assertIn("""epoch 1:""" , lowercase__ )
else:
self.assertIn("""epoch 0:""" , lowercase__ )
self.assertIn("""epoch 1:""" , lowercase__ )
@slow
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(self._launch_args + testargs , return_stdout=lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = re.findall("""({.+})""" , lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = [r for r in results if """accuracy""" in r][-1]
SCREAMING_SNAKE_CASE__ : List[str] = ast.literal_eval(lowercase__ )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __magic_name__ (self ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
SCREAMING_SNAKE_CASE__ : Optional[int] = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , """tracking""" ) ) )
def __magic_name__ (self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 223 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 303 | 0 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Tuple = (KDPMaDiscreteScheduler,)
lowercase : Optional[Any] = 10
def UpperCamelCase__ ( self , **__A ) -> Dict:
_lowerCAmelCase ={
'num_train_timesteps': 1100,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**__A )
return config
def UpperCamelCase__ ( self ) -> str:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def UpperCamelCase__ ( self ) -> List[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__A )
def UpperCamelCase__ ( self ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def UpperCamelCase__ ( self ) -> Any:
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config(prediction_type='v_prediction' )
_lowerCAmelCase =scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase =sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase =scheduler.scale_model_input(__A , __A )
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A )
_lowerCAmelCase =output.prev_sample
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0_002 ) < 1E-3
def UpperCamelCase__ ( self ) -> List[str]:
if torch_device == "mps":
return
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config()
_lowerCAmelCase =scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase =sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase =scheduler.scale_model_input(__A , __A )
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A )
_lowerCAmelCase =output.prev_sample
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
def UpperCamelCase__ ( self ) -> Any:
if torch_device == "mps":
return
_lowerCAmelCase =self.scheduler_classes[0]
_lowerCAmelCase =self.get_scheduler_config()
_lowerCAmelCase =scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps , device=__A )
_lowerCAmelCase =self.dummy_model()
_lowerCAmelCase =self.dummy_sample_deter.to(__A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase =scheduler.scale_model_input(__A , __A )
_lowerCAmelCase =model(__A , __A )
_lowerCAmelCase =scheduler.step(__A , __A , __A )
_lowerCAmelCase =output.prev_sample
_lowerCAmelCase =torch.sum(torch.abs(__A ) )
_lowerCAmelCase =torch.mean(torch.abs(__A ) )
if str(__A ).startswith('cpu' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0_266 ) < 1E-3
| 702 | '''simple docstring'''
from __future__ import annotations
lowercase_ = 10
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =1
_lowerCAmelCase =max(a__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase =[[] for _ in range(a__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase =int((i / placement) % RADIX )
buckets[tmp].append(a__ )
# put each buckets' contents into list_of_ints
_lowerCAmelCase =0
for b in range(a__ ):
for i in buckets[b]:
_lowerCAmelCase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58 | 0 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=4 , ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = parent
lowerCAmelCase__ :Any = batch_size
lowerCAmelCase__ :Optional[Any] = seq_length
lowerCAmelCase__ :int = is_training
lowerCAmelCase__ :Union[str, Any] = use_attention_mask
lowerCAmelCase__ :Tuple = use_token_type_ids
lowerCAmelCase__ :Tuple = use_labels
lowerCAmelCase__ :int = vocab_size
lowerCAmelCase__ :str = hidden_size
lowerCAmelCase__ :Dict = num_hidden_layers
lowerCAmelCase__ :Optional[Any] = num_attention_heads
lowerCAmelCase__ :Any = intermediate_size
lowerCAmelCase__ :Optional[int] = hidden_act
lowerCAmelCase__ :Tuple = hidden_dropout_prob
lowerCAmelCase__ :List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ :Any = max_position_embeddings
lowerCAmelCase__ :Optional[Any] = type_vocab_size
lowerCAmelCase__ :Optional[int] = type_sequence_label_size
lowerCAmelCase__ :List[Any] = initializer_range
lowerCAmelCase__ :Optional[Any] = num_choices
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ :Any = None
if self.use_attention_mask:
lowerCAmelCase__ :Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ :List[str] = None
if self.use_token_type_ids:
lowerCAmelCase__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ :Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :str = config_and_inputs
lowerCAmelCase__ :Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.prepare_config_and_inputs()
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :List[str] = config_and_inputs
lowerCAmelCase__ :Union[str, Any] = True
lowerCAmelCase__ :List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _UpperCAmelCase ( _A , unittest.TestCase ):
"""simple docstring"""
A = True
A = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def snake_case_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCAmelCase__ :Union[str, Any] = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_lowerCAmelCase )
lowerCAmelCase__ :List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_lowerCAmelCase )
lowerCAmelCase__ :Union[str, Any] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
lowerCAmelCase__ :Optional[Any] = model(_lowerCAmelCase )[0]
lowerCAmelCase__ :Tuple = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , _lowerCAmelCase )
# compare the actual values for a slice.
lowerCAmelCase__ :List[str] = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
@slow
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
lowerCAmelCase__ :Tuple = model(_lowerCAmelCase )[0]
# compare the actual values for a slice.
lowerCAmelCase__ :int = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1e-4 ) )
| 145 |
from __future__ import annotations
def snake_case__ ( UpperCAmelCase : str ):
return [ord(UpperCAmelCase ) - 9_6 for elem in plain]
def snake_case__ ( UpperCAmelCase : list[int] ):
return "".join(chr(elem + 9_6 ) for elem in encoded )
def snake_case__ ( ):
lowerCAmelCase__ :Optional[int] = encode(input("-> " ).strip().lower() )
print("Encoded: " , UpperCAmelCase )
print("Decoded:" , decode(UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 145 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : Optional[Any] = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 334 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__A : Tuple = False
__A : Optional[int] = True
__A : Optional[Any] = False
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
__A : List[str] = parser.parse_args()
__A : List[Any] = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
__A : Optional[int] = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
__A : List[str] = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
__A : List[str] = reader.read()
__A : int = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
__A : List[str] = UNetaDModel(**config)
else:
__A : Dict = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
__A : Optional[int] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__A : List[Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__A : Optional[Any] = config[key]
del config[key]
__A : Dict = [k.replace("UNetRes", "") for k in config["down_block_types"]]
__A : Tuple = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
__A : Dict = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
__A : Tuple = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
__A : Dict = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
__A : List[Any] = param_value
__A : Optional[Any] = True
if not has_changed:
__A : List[Any] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 334 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
return 1 if input_a == input_a else 0
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 135 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = original_name.split('''.''' )[0]
lowercase__ = key.split('''.''' )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] )
lowercase__ = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] )
lowercase__ = orig_block_num - offset
lowercase__ = key.replace(f'{orig_block_num}.{layer_num}.{original_name}' , f'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = OrderedDict()
lowercase__ , lowercase__ = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
lowercase__ = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
lowercase__ = key[: key.find('''proj''' )]
lowercase__ = key.replace(SCREAMING_SNAKE_CASE , f'patch_embeddings.{total_embed_found}.' )
lowercase__ = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
lowercase__ = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm1''' , '''before_norm''' )
if "norm2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
lowercase__ = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
lowercase__ = key.replace('''head''' , '''classifier''' )
lowercase__ = value
return new_state_dict
def _a ( ):
"""simple docstring"""
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = PoolFormerConfig()
# set attributes based on model_name
lowercase__ = '''huggingface/label-files'''
lowercase__ = model_name[-3:]
lowercase__ = 10_00
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = (1, 10_00)
# set config attributes
lowercase__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
if size == "s12":
lowercase__ = [2, 2, 6, 2]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s24":
lowercase__ = [4, 4, 12, 4]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 0.9
elif size == "s36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [64, 1_28, 3_20, 5_12]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.9
elif size == "m36":
lowercase__ = [6, 6, 18, 6]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
elif size == "m48":
lowercase__ = [8, 8, 24, 8]
lowercase__ = [96, 1_92, 3_84, 7_68]
lowercase__ = 4.0
lowercase__ = 1E-6
lowercase__ = 0.95
else:
raise ValueError(f'Size {size} not supported' )
# load image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
# Prepare image
lowercase__ = prepare_img()
lowercase__ = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
lowercase__ = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
# rename keys
lowercase__ = rename_keys(SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowercase__ = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
lowercase__ = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
lowercase__ = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
lowercase__ = model(SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowercase__ = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
lowercase__ = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
lowercase__ = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
lowercase__ = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
lowercase__ = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 43 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
a : List[str] = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
a : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 680 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( _UpperCAmelCase : Dict ) -> int: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( ) -> Dict:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
__snake_case = [1, 2, 3]
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=2 )
with pytest.raises(_UpperCAmelCase ):
with parallel_backend("unsupported backend" ):
map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__snake_case = [1, 2]
__snake_case = {"a": 1, "b": 2}
__snake_case = {"a": [1, 2], "b": [3, 4]}
__snake_case = {"a": {"1": 1}, "b": 2}
__snake_case = {"a": 1, "b": 2, "c": 3, "d": 4}
__snake_case = [2, 3]
__snake_case = {"a": 2, "b": 3}
__snake_case = {"a": [2, 3], "b": [4, 5]}
__snake_case = {"a": {"1": 2}, "b": 3}
__snake_case = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
assert map_nested(_UpperCAmelCase , _UpperCAmelCase , num_proc=_UpperCAmelCase ) == expected_map_nested_sa
| 680 | 1 |
from collections import defaultdict
from math import gcd
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1_500_000 ):
'''simple docstring'''
__UpperCamelCase :defaultdict = defaultdict(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , SCREAMING_SNAKE_CASE , 2 ):
if gcd(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > 1:
continue
__UpperCamelCase :Any = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(SCREAMING_SNAKE_CASE , limit + 1 , SCREAMING_SNAKE_CASE ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'{solution() = }')
| 167 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowercase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BICUBIC , __lowercase = True , __lowercase = None , __lowercase = True , __lowercase = 1 / 255 , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = True , **__lowercase , ) -> None:
super().__init__(**__lowercase)
__UpperCamelCase :str = size if size is not None else {'''shortest_edge''': 224}
__UpperCamelCase :Tuple = get_size_dict(__lowercase , default_to_square=__lowercase)
__UpperCamelCase :List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__UpperCamelCase :Optional[int] = get_size_dict(__lowercase , default_to_square=__lowercase , param_name='''crop_size''')
__UpperCamelCase :List[str] = do_resize
__UpperCamelCase :Any = size
__UpperCamelCase :Dict = resample
__UpperCamelCase :List[Any] = do_center_crop
__UpperCamelCase :Any = crop_size
__UpperCamelCase :Any = do_rescale
__UpperCamelCase :Optional[Any] = rescale_factor
__UpperCamelCase :List[str] = do_normalize
__UpperCamelCase :List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCamelCase :int = image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCamelCase :Tuple = do_convert_rgb
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray:
__UpperCamelCase :Union[str, Any] = get_size_dict(__lowercase , default_to_square=__lowercase)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
__UpperCamelCase :Tuple = get_resize_output_image_size(__lowercase , size=size['''shortest_edge'''] , default_to_square=__lowercase)
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
__UpperCamelCase :Optional[Any] = get_size_dict(__lowercase)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""")
return center_crop(__lowercase , size=(size['''height'''], size['''width''']) , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> List[Any]:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
__UpperCamelCase :List[Any] = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase :Optional[int] = size if size is not None else self.size
__UpperCamelCase :int = get_size_dict(__lowercase , param_name='''size''' , default_to_square=__lowercase)
__UpperCamelCase :str = resample if resample is not None else self.resample
__UpperCamelCase :List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase :Dict = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase :Dict = get_size_dict(__lowercase , param_name='''crop_size''' , default_to_square=__lowercase)
__UpperCamelCase :Dict = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase :str = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase :Tuple = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase :Dict = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase :List[Any] = image_std if image_std is not None else self.image_std
__UpperCamelCase :List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCamelCase :Tuple = make_list_of_images(__lowercase)
if not valid_images(__lowercase):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCamelCase :Optional[int] = [convert_to_rgb(__lowercase) for image in images]
# All transformations expect numpy arrays.
__UpperCamelCase :Optional[int] = [to_numpy_array(__lowercase) for image in images]
if do_resize:
__UpperCamelCase :List[Any] = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase) for image in images]
if do_center_crop:
__UpperCamelCase :List[Any] = [self.center_crop(image=__lowercase , size=__lowercase) for image in images]
if do_rescale:
__UpperCamelCase :str = [self.rescale(image=__lowercase , scale=__lowercase) for image in images]
if do_normalize:
__UpperCamelCase :Union[str, Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase) for image in images]
__UpperCamelCase :str = [to_channel_dimension_format(__lowercase , __lowercase) for image in images]
__UpperCamelCase :Tuple = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase)
| 167 | 1 |
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCamelCase = datasets.utils.logging.get_logger(__name__)
class lowercase_ (folder_based_builder.FolderBasedBuilderConfig ):
A__ : List[Any] = None
A__ : str = None
class lowercase_ (folder_based_builder.FolderBasedBuilder ):
A__ : Any = datasets.Audio()
A__ : Any = '''audio'''
A__ : List[Any] = AudioFolderConfig
A__ : List[Any] = 42 # definition at the bottom of the script
A__ : str = AudioClassification(audio_column='''audio''', label_column='''label''' )
UpperCamelCase = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
UpperCamelCase = AUDIO_EXTENSIONS
| 708 |
"""simple docstring"""
def lowerCAmelCase ( UpperCamelCase_: Optional[int] , UpperCamelCase_: str ) -> List[Any]:
'''simple docstring'''
_a = (boundary[1] - boundary[0]) / steps
_a = boundary[0]
_a = boundary[1]
_a = make_points(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_a = 0.0
y += (h / 2.0) * f(UpperCamelCase_ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase_ )
y += (h / 2.0) * f(UpperCamelCase_ )
return y
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: Any , UpperCamelCase_: Any ) -> str:
'''simple docstring'''
_a = a + h
while x < (b - h):
yield x
_a = x + h
def lowerCAmelCase ( UpperCamelCase_: List[str] ) -> int: # enter your function here
'''simple docstring'''
_a = (x - 0) * (x - 0)
return y
def lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
_a = 0.0 # Lower bound of integration
_a = 1.0 # Upper bound of integration
_a = 10.0 # define number of steps or resolution
_a = [a, b] # define boundary of integration
_a = method_a(UpperCamelCase_ , UpperCamelCase_ )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 612 | 0 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def _A ( lowercase__ ):
print("""Loading config file...""" )
def flatten_yaml_as_dict(lowercase__ , lowercase__="" , lowercase__="." ):
lowercase__ = []
for k, v in d.items():
lowercase__ = parent_key + sep + k if parent_key else k
if isinstance(lowercase__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowercase__ , lowercase__ , sep=lowercase__ ).items() )
else:
items.append((new_key, v) )
return dict(lowercase__ )
lowercase__ = argparse.Namespace()
with open(lowercase__ , """r""" ) as yaml_file:
try:
lowercase__ = yaml.load(lowercase__ , Loader=yaml.FullLoader )
lowercase__ = flatten_yaml_as_dict(lowercase__ )
for k, v in flat_cfg.items():
setattr(lowercase__ , lowercase__ , lowercase__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(lowercase__ , str(lowercase__ ) ) )
return config
def _A ( lowercase__ , lowercase__ ):
lowercase__ = MobileViTVaConfig()
lowercase__ = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowercase__ = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowercase__ = 384
else:
lowercase__ = 256
lowercase__ = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowercase__ = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowercase__ = 384
else:
lowercase__ = 256
lowercase__ = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowercase__ = 151
lowercase__ = 512
lowercase__ = """ade20k-id2label.json"""
lowercase__ = True
elif task_name.startswith("""voc_""" ):
lowercase__ = 21
lowercase__ = 512
lowercase__ = """pascal-voc-id2label.json"""
lowercase__ = True
# orig_config
lowercase__ = load_orig_config_file(lowercase__ )
assert getattr(lowercase__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowercase__ = getattr(lowercase__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(lowercase__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowercase__ = getattr(lowercase__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowercase__ = getattr(lowercase__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowercase__ = getattr(lowercase__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowercase__ = getattr(lowercase__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
lowercase__ = getattr(lowercase__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowercase__ = """huggingface/label-files"""
lowercase__ = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowercase__ = {int(lowercase__ ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def _A ( lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = dct.pop(lowercase__ )
lowercase__ = val
def _A ( lowercase__ , lowercase__=False ):
if base_model:
lowercase__ = """"""
else:
lowercase__ = """mobilevitv2."""
lowercase__ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowercase__ = k[8:]
else:
lowercase__ = k
if ".block." in k:
lowercase__ = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowercase__ = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowercase__ = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowercase__ = k_new.replace("""conv_1.""" , f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowercase__ = k_new.replace(f'''layer_{i}.''' , f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowercase__ = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowercase__ = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowercase__ = k_new.replace(f'''layer_{i}.0.''' , f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowercase__ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowercase__ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowercase__ = [0, 1]
elif i == 4:
lowercase__ = [0, 1, 2, 3]
elif i == 5:
lowercase__ = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowercase__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' , f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowercase__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' , f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowercase__ = k_new.replace(f'''layer_{i}.1.conv_proj.''' , f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowercase__ = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowercase__ = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowercase__ = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowercase__ = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowercase__ = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowercase__ = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowercase__ = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowercase__ = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowercase__ = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def _A ( lowercase__ ):
lowercase__ = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(lowercase__ )
for k in keys_to_ignore:
state_dict.pop(lowercase__ , lowercase__ )
def _A ( ):
lowercase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowercase__ = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _A ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
lowercase__ = get_mobilevitva_config(lowercase__ , lowercase__ )
# load original state_dict
lowercase__ = torch.load(lowercase__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowercase__ = MobileViTVaForSemanticSegmentation(lowercase__ ).eval()
lowercase__ = False
else:
lowercase__ = MobileViTVaForImageClassification(lowercase__ ).eval()
lowercase__ = False
# remove and rename some keys of load the original model
lowercase__ = checkpoint
remove_unused_keys(lowercase__ )
lowercase__ = create_rename_keys(lowercase__ , base_model=lowercase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
# load modified state_dict
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowercase__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowercase__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowercase__ = model(**lowercase__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowercase__ = outputs.logits
lowercase__ = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowercase__ = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3] , lowercase__ , atol=1e-4 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
__A = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 325 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__( self , lowerCamelCase__ , lowerCamelCase__=3 , lowerCamelCase__=32 , lowerCamelCase__=3 , lowerCamelCase__=10 , lowerCamelCase__=[10, 20, 30, 40] , lowerCamelCase__=[1, 1, 2, 1] , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=3 , lowerCamelCase__=None , ) -> List[str]:
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(lowerCamelCase__ )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Any:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = RegNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = RegNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Optional[Any] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
lowerCamelCase : Any = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : str = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : str = False
lowerCamelCase : Union[str, Any] = False
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = RegNetModelTester(self )
lowercase__ = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Any:
'''simple docstring'''
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def A__ ( self ) -> str:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase__ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(config=lowerCamelCase__ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ = layer_type
lowercase__ = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = RegNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _A ( ):
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> Dict:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase__ )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
lowercase__ = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 325 | 1 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowerCAmelCase__ = "\\n Text data.\n Second line of data."
lowerCAmelCase__ = "file"
@pytest.fixture(scope="session" )
def _lowerCAmelCase( __A ):
UpperCAmelCase = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
UpperCAmelCase = bytes(UpperCAmelCase__ , "utf-8" )
with zstd.open(UpperCAmelCase__ , "wb" ) as f:
f.write(UpperCAmelCase__ )
return path
@pytest.fixture
def _lowerCAmelCase( __A ):
with open(os.path.join(tmpfs.local_root_dir , UpperCAmelCase__ ) , "w" ) as f:
f.write(UpperCAmelCase__ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def _lowerCAmelCase( __A , __A , __A , __A , __A , __A ):
UpperCAmelCase = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
UpperCAmelCase = input_paths[compression_format]
UpperCAmelCase = tmp_path / "cache"
UpperCAmelCase = DownloadConfig(cache_dir=UpperCAmelCase__ , extract_compressed_file=UpperCAmelCase__ )
UpperCAmelCase = cached_path(UpperCAmelCase__ , download_config=UpperCAmelCase__ )
with open(UpperCAmelCase__ ) as f:
UpperCAmelCase = f.read()
with open(UpperCAmelCase__ ) as f:
UpperCAmelCase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def _lowerCAmelCase( __A , __A , __A , __A , __A ):
UpperCAmelCase = "custom_cache"
UpperCAmelCase = "custom_extracted_dir"
UpperCAmelCase = tmp_path / "custom_extracted_path"
if default_extracted:
UpperCAmelCase = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , UpperCAmelCase__ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(UpperCAmelCase__ ) )
UpperCAmelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCAmelCase = xz_file
UpperCAmelCase = (
DownloadConfig(extract_compressed_file=UpperCAmelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=UpperCAmelCase__ )
)
UpperCAmelCase = cached_path(UpperCAmelCase__ , download_config=UpperCAmelCase__ )
assert Path(UpperCAmelCase__ ).parent.parts[-2:] == expected
def _lowerCAmelCase( __A ):
# absolute path
UpperCAmelCase = str(Path(UpperCAmelCase__ ).resolve() )
assert cached_path(UpperCAmelCase__ ) == text_file
# relative path
UpperCAmelCase = str(Path(UpperCAmelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(UpperCAmelCase__ ) == text_file
def _lowerCAmelCase( __A ):
# absolute path
UpperCAmelCase = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(UpperCAmelCase__ ):
cached_path(UpperCAmelCase__ )
# relative path
UpperCAmelCase = "./__missing_file__.txt"
with pytest.raises(UpperCAmelCase__ ):
cached_path(UpperCAmelCase__ )
def _lowerCAmelCase( __A ):
UpperCAmelCase = get_from_cache(F"tmp://{tmpfs_file}" )
with open(UpperCAmelCase__ ) as f:
UpperCAmelCase = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , UpperCAmelCase__ )
def _lowerCAmelCase( ):
with pytest.raises(UpperCAmelCase__ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , UpperCAmelCase__ )
def _lowerCAmelCase( __A ):
UpperCAmelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(UpperCAmelCase__ ):
http_get("https://huggingface.co" , temp_file=UpperCAmelCase__ )
with pytest.raises(UpperCAmelCase__ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , UpperCAmelCase__ )
def _lowerCAmelCase( __A ):
UpperCAmelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(UpperCAmelCase__ ):
ftp_get("ftp://huggingface.co" , temp_file=UpperCAmelCase__ )
with pytest.raises(UpperCAmelCase__ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , UpperCAmelCase__ )
def _lowerCAmelCase( __A ):
UpperCAmelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(UpperCAmelCase__ ):
fsspec_get("s3://huggingface.co" , temp_file=UpperCAmelCase__ )
with pytest.raises(UpperCAmelCase__ ):
fsspec_head("s3://huggingface.co" )
| 714 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCAmelCase__ = logging.getLogger(__name__)
lowerCAmelCase__ = 50 # max width of layer names
lowerCAmelCase__ = 70 # max width of quantizer names
def _lowerCAmelCase( __A ):
UpperCAmelCase = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=__A , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=__A , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=__A , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=__A , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=__A , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=__A , type=__A , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=__A , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def _lowerCAmelCase( __A ):
if args.calibrator == "max":
UpperCAmelCase = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
UpperCAmelCase = "histogram"
elif args.calibrator == "mse":
UpperCAmelCase = "histogram"
else:
raise ValueError(F"Invalid calibrator {args.calibrator}" )
UpperCAmelCase = QuantDescriptor(num_bits=args.aprec , calib_method=__A )
UpperCAmelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__A )
quant_nn.QuantLinear.set_default_quant_desc_weight(__A )
def _lowerCAmelCase( __A , __A , __A=False , __A=False ):
logger.info("Configuring Model for Quantization" )
logger.info(F"using quantization package {pytorch_quantization.__file__}" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__A , ["embeddings"] , which="weight" , _disabled=__A )
if args.quant_disable:
set_quantizer_by_name(__A , [""] , _disabled=__A )
if args.quant_disable_keyword:
set_quantizer_by_name(__A , args.quant_disable_keyword , _disabled=__A )
if args.quant_disable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=__A )
if args.quant_enable_layer_module:
set_quantizer_by_name(__A , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=__A )
if args.recalibrate_weights:
recalibrate_weights(__A )
if args.fuse_qkv:
fuse_qkv(__A , __A )
if args.clip_gelu:
clip_gelu(__A , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__A )
def _lowerCAmelCase( __A ):
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"{name:80}: {module}" )
def _lowerCAmelCase( __A , __A ):
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__A )
def _lowerCAmelCase( __A , __A ):
def fusea(__A , __A , __A ):
for mod in [qq, qk, qv]:
if not hasattr(__A , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
UpperCAmelCase = qq._amax.detach().item()
UpperCAmelCase = qk._amax.detach().item()
UpperCAmelCase = qv._amax.detach().item()
UpperCAmelCase = max(__A , __A , __A )
qq._amax.fill_(__A )
qk._amax.fill_(__A )
qv._amax.fill_(__A )
logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(F"FUSE_QKV: {name:{name_width}}" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _lowerCAmelCase( __A , __A ):
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__A )
UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
UpperCAmelCase = mod.weight.shape[0]
UpperCAmelCase = mod._weight_quantizer._amax.detach()
UpperCAmelCase = torch.ones(__A , dtype=amax.dtype , device=amax.device ) * amax
print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" )
def _lowerCAmelCase( __A ):
for name, mod in model.named_modules():
if hasattr(__A , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
UpperCAmelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
UpperCAmelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
UpperCAmelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__A , keepdims=__A ).detach()
logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" )
UpperCAmelCase = amax
def _lowerCAmelCase( __A , __A=25 , __A=180 , __A=None ):
if ignore is None:
UpperCAmelCase = []
elif not isinstance(__A , __A ):
UpperCAmelCase = [ignore]
UpperCAmelCase = 0
for name, mod in model.named_modules():
if not hasattr(__A , "weight" ):
continue
UpperCAmelCase = max(__A , len(__A ) )
for name, mod in model.named_modules():
UpperCAmelCase = getattr(__A , "_input_quantizer" , __A )
UpperCAmelCase = getattr(__A , "_weight_quantizer" , __A )
if not hasattr(__A , "weight" ):
continue
if type(__A ) in ignore:
continue
if [True for s in ignore if type(__A ) is str and s in name]:
continue
UpperCAmelCase = F"Act:{input_q.extra_repr()}"
UpperCAmelCase = F"Wgt:{weight_q.extra_repr()}"
UpperCAmelCase = F"{name:{name_width}} {act_str} {wgt_str}"
if len(__A ) <= line_width:
logger.info(__A )
else:
logger.info(F"{name:{name_width}} {act_str}" )
logger.info(F"{' ':{name_width}} {wgt_str}" )
def _lowerCAmelCase( __A ):
UpperCAmelCase = 0
for name, mod in model.named_modules():
if isinstance(__A , pytorch_quantization.nn.TensorQuantizer ):
print(F"{name:80} {mod}" )
count += 1
print(F"{count} TensorQuantizers found in model" )
def _lowerCAmelCase( __A , __A , __A , __A , __A ):
UpperCAmelCase = getattr(__A , __A , __A )
if quantizer_mod is not None:
assert hasattr(__A , __A )
setattr(__A , __A , __A )
else:
logger.warning(F"{name} has no {quantizer}" )
def _lowerCAmelCase( __A , __A , __A="both" , **__A ):
UpperCAmelCase = F"Warning: changing {which} quantizers of {name:{qname_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
if which in ["input", "both"]:
set_quantizer(__A , __A , "_input_quantizer" , __A , __A )
if which in ["weight", "both"]:
set_quantizer(__A , __A , "_weight_quantizer" , __A , __A )
logger.info(__A )
def _lowerCAmelCase( __A , __A , **__A ):
for name, mod in model.named_modules():
if hasattr(__A , "_input_quantizer" ) or hasattr(__A , "_weight_quantizer" ):
for n in names:
if re.search(__A , __A ):
set_quantizers(__A , __A , **__A )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(__A , __A ):
UpperCAmelCase = F"Warning: changing {name:{name_width}}"
for k, v in kwargs.items():
s += F" {k}={v}"
setattr(__A , __A , __A )
logger.info(__A )
| 1 | 0 |
# Lint as: python3
import itertools
import os
import re
__SCREAMING_SNAKE_CASE = re.compile(r'([A-Z]+)([A-Z][a-z])')
__SCREAMING_SNAKE_CASE = re.compile(r'([a-z\d])([A-Z])')
__SCREAMING_SNAKE_CASE = re.compile(r'(?<!_)_(?!_)')
__SCREAMING_SNAKE_CASE = re.compile(r'(_{2,})')
__SCREAMING_SNAKE_CASE = r'^\w+(\.\w+)*$'
__SCREAMING_SNAKE_CASE = r'<>:/\|?*'
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =_uppercase_uppercase_re.sub(r'\1_\2' ,lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] =_lowercase_uppercase_re.sub(r'\1_\2' ,lowerCAmelCase_ )
return name.lower()
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =_single_underscore_re.split(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] =[_multiple_underscores_re.split(lowerCAmelCase_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCAmelCase_ ) if n != '' )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if os.path.basename(lowerCAmelCase_ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Any ,lowerCAmelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
if os.path.basename(lowerCAmelCase_ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re ,lowerCAmelCase_ ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(lowerCAmelCase_ )}-{split}"""
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Optional[Any] ,lowerCAmelCase_ : Tuple=None ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] =filename_prefix_for_split(lowerCAmelCase_ ,lowerCAmelCase_ )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
SCREAMING_SNAKE_CASE_ : Optional[int] =os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ )
return F"""{filepath}*"""
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Optional[int] ,lowerCAmelCase_ : Union[str, Any] ,lowerCAmelCase_ : List[str]=None ,lowerCAmelCase_ : Tuple=None ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] =filename_prefix_for_split(lowerCAmelCase_ ,lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Any =os.path.join(lowerCAmelCase_ ,lowerCAmelCase_ )
if shard_lengths:
SCREAMING_SNAKE_CASE_ : str =len(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] =[F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(lowerCAmelCase_ )]
if filetype_suffix:
SCREAMING_SNAKE_CASE_ : List[Any] =[filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
SCREAMING_SNAKE_CASE_ : List[str] =prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 220 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = '▁'
__SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
__SCREAMING_SNAKE_CASE = {
'facebook/mbart-large-50-one-to-many-mmt': 1024,
}
# fmt: off
__SCREAMING_SNAKE_CASE = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = ['input_ids', 'attention_mask']
_lowercase = []
_lowercase = []
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase = None , **__UpperCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Tuple =AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE_ : str ={} if sp_model_kwargs is None else sp_model_kwargs
SCREAMING_SNAKE_CASE_ : Dict =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ : Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ : List[Any] =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE_ : Tuple ={'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE_ : str =1
SCREAMING_SNAKE_CASE_ : Tuple =len(self.sp_model )
SCREAMING_SNAKE_CASE_ : Tuple ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCAmelCase )
}
SCREAMING_SNAKE_CASE_ : List[str] ={v: k for k, v in self.lang_code_to_id.items()}
SCREAMING_SNAKE_CASE_ : int =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
SCREAMING_SNAKE_CASE_ : List[Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
SCREAMING_SNAKE_CASE_ : Optional[Any] =src_lang if src_lang is not None else 'en_XX'
SCREAMING_SNAKE_CASE_ : Any =self.lang_code_to_id[self._src_lang]
SCREAMING_SNAKE_CASE_ : List[Any] =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __lowerCamelCase ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __lowerCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] =self.__dict__.copy()
SCREAMING_SNAKE_CASE_ : Any =None
return state
def __setstate__( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ : Any ={}
SCREAMING_SNAKE_CASE_ : List[str] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCamelCase ( self ):
SCREAMING_SNAKE_CASE_ : List[str] ={self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCamelCase ( self , __UpperCAmelCase ):
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE_ : List[Any] =self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __lowerCamelCase ( self , __UpperCAmelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any =[]
SCREAMING_SNAKE_CASE_ : str =''
SCREAMING_SNAKE_CASE_ : List[str] =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
SCREAMING_SNAKE_CASE_ : Dict =True
SCREAMING_SNAKE_CASE_ : Dict =[]
else:
current_sub_tokens.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE_ : str =os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict =[1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE_ : List[Any] =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
SCREAMING_SNAKE_CASE_ : List[str] =src_lang
SCREAMING_SNAKE_CASE_ : Dict =self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int =self.convert_tokens_to_ids(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str =tgt_lang_id
return inputs
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = "en_XX" , __UpperCAmelCase = None , __UpperCAmelCase = "ro_RO" , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Dict =src_lang
SCREAMING_SNAKE_CASE_ : List[Any] =tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __lowerCamelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] =self.lang_code_to_id[src_lang]
SCREAMING_SNAKE_CASE_ : List[Any] =[self.cur_lang_code_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] =[self.eos_token_id]
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any =self.lang_code_to_id[tgt_lang]
SCREAMING_SNAKE_CASE_ : int =[self.cur_lang_code_id]
SCREAMING_SNAKE_CASE_ : Optional[Any] =[self.eos_token_id]
| 220 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 715 | import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase_ ( UpperCamelCase__ : List[str], UpperCamelCase__ : Any ):
'''simple docstring'''
UpperCamelCase__ = torch.load(UpperCamelCase__, map_location='''cpu''' )
UpperCamelCase__ = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCamelCase__ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCamelCase__ = v
else:
UpperCamelCase__ = v
UpperCamelCase__ = chkpt['''params''']
UpperCamelCase__ = {n: v for n, v in config.items() if not isinstance(UpperCamelCase__, (torch.FloatTensor, numpy.ndarray) )}
UpperCamelCase__ = chkpt['''dico_word2id''']
UpperCamelCase__ = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''', '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCamelCase__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCamelCase__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCamelCase__ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(UpperCamelCase__, UpperCamelCase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(UpperCamelCase__, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__, indent=2 ) + '''\n''' )
print(F"""Save vocab file to {pytorch_config_dump_path}""" )
with open(UpperCamelCase__, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__, indent=2 ) + '''\n''' )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 591 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.