code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _A ( A__ = "isbn/0140328726" ):
"""simple docstring"""
__lowercase = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
__lowercase = F"{olid} is not a valid Open Library olid"
raise ValueError(A__ )
return requests.get(F"https://openlibrary.org/{new_olid}.json" ).json()
def _A ( A__ ):
"""simple docstring"""
__lowercase = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
__lowercase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__lowercase = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
__lowercase = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(A__ , A__ ):
__lowercase = ''', '''.join(A__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCAmelCase__ = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(f'\nSearching Open Library for ISBN: {isbn}...\n')
try:
lowerCAmelCase__ = summarize_book(get_openlibrary_data(f'isbn/{isbn}'))
print('''\n'''.join(f'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'Sorry, there are no results for ISBN: {isbn}.')
| 104 |
'''simple docstring'''
import os
lowerCAmelCase__ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000}
def _A ( A__ ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
while index < len(A__ ) - 1:
__lowercase = SYMBOLS[numerals[index]]
__lowercase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _A ( A__ ):
"""simple docstring"""
__lowercase = ''''''
__lowercase = num // 1000
numerals += m_count * "M"
num %= 1000
__lowercase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
__lowercase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _A ( A__ = "/p089_roman.txt" ):
"""simple docstring"""
__lowercase = 0
with open(os.path.dirname(A__ ) + roman_numerals_filename ) as filea:
__lowercase = filea.readlines()
for line in lines:
__lowercase = line.strip()
__lowercase = parse_roman_numerals(A__ )
__lowercase = generate_roman_numerals(A__ )
savings += len(A__ ) - len(A__ )
return savings
if __name__ == "__main__":
print(f'{solution() = }')
| 104 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : int = logging.get_logger()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = True ):
print(f'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_2_8:
if name[-1] == "S":
_UpperCamelCase : Optional[Any] = timm.create_model('levit_128s' , pretrained=UpperCAmelCase_ )
else:
_UpperCamelCase : Any = timm.create_model('levit_128' , pretrained=UpperCAmelCase_ )
if hidden_sizes == 1_9_2:
_UpperCamelCase : Any = timm.create_model('levit_192' , pretrained=UpperCAmelCase_ )
if hidden_sizes == 2_5_6:
_UpperCamelCase : Tuple = timm.create_model('levit_256' , pretrained=UpperCAmelCase_ )
if hidden_sizes == 3_8_4:
_UpperCamelCase : int = timm.create_model('levit_384' , pretrained=UpperCAmelCase_ )
from_model.eval()
_UpperCamelCase : Dict = LevitForImageClassificationWithTeacher(UpperCAmelCase_ ).eval()
_UpperCamelCase : Optional[Any] = OrderedDict()
_UpperCamelCase : int = from_model.state_dict()
_UpperCamelCase : Any = list(from_model.state_dict().keys() )
_UpperCamelCase : Tuple = list(our_model.state_dict().keys() )
print(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for i in range(len(UpperCAmelCase_ ) ):
_UpperCamelCase : Dict = weights[og_keys[i]]
our_model.load_state_dict(UpperCAmelCase_ )
_UpperCamelCase : List[str] = torch.randn((2, 3, 2_2_4, 2_2_4) )
_UpperCamelCase : List[str] = from_model(UpperCAmelCase_ )
_UpperCamelCase : List[Any] = our_model(UpperCAmelCase_ ).logits
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ ), "The model logits don't match the original one."
_UpperCamelCase : Any = name
print(UpperCAmelCase_ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
_UpperCamelCase : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'Pushed {checkpoint_name}' )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = True ):
_UpperCamelCase : int = 'imagenet-1k-id2label.json'
_UpperCamelCase : int = 1_0_0_0
_UpperCamelCase : Dict = (1, num_labels)
_UpperCamelCase : Dict = 'huggingface/label-files'
_UpperCamelCase : Optional[Any] = num_labels
_UpperCamelCase : Any = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
_UpperCamelCase : Any = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
_UpperCamelCase : List[str] = idalabel
_UpperCamelCase : Dict = {v: k for k, v in idalabel.items()}
_UpperCamelCase : str = partial(UpperCAmelCase_ , num_labels=UpperCAmelCase_ , idalabel=UpperCAmelCase_ , labelaid=UpperCAmelCase_ )
_UpperCamelCase : Tuple = {
'levit-128S': 1_2_8,
'levit-128': 1_2_8,
'levit-192': 1_9_2,
'levit-256': 2_5_6,
'levit-384': 3_8_4,
}
_UpperCamelCase : Optional[Any] = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 8, 1_2] , depths=[4, 4, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_9_2, 2_8_8, 3_8_4] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_5_6, 3_8_4, 5_1_2] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_8_4, 5_1_2, 7_6_8] , num_attention_heads=[6, 9, 1_2] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCAmelCase_ , names_to_config[model_name] , UpperCAmelCase_ , UpperCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
snake_case_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
snake_case_ : Dict = parser.parse_args()
snake_case_ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 371 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# Initialise PyTorch model
_UpperCamelCase : Any = LxmertConfig.from_json_file(UpperCAmelCase_ )
print(f'Building PyTorch model from configuration: {config}' )
_UpperCamelCase : int = LxmertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 236 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class a ( unittest.TestCase ):
def UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
lowerCamelCase_ = tempfile.mkdtemp()
lowerCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'的',
'价',
'格',
'是',
'15',
'便',
'alex',
'##andra',
',',
'。',
'-',
't',
'shirt',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCamelCase_ = {
'do_resize': True,
'size': {'height': 224, 'width': 224},
'do_center_crop': True,
'crop_size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
'do_convert_rgb': True,
}
lowerCamelCase_ = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Tuple , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
return BertTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : Any ) -> List[str]:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Any , **__SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Optional[Any] ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self : str ) -> List[Any]:
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : str ) -> Tuple:
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' )
lowerCamelCase_ = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=__SCREAMING_SNAKE_CASE )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[Any] ) -> Dict:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='np' )
lowerCamelCase_ = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self : Tuple ) -> Dict:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = 'Alexandra,T-shirt的价格是15便士。'
lowerCamelCase_ = processor(text=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer(__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self : Tuple ) -> Optional[int]:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = 'Alexandra,T-shirt的价格是15便士。'
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ = processor.batch_decode(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Dict ) -> int:
lowerCamelCase_ = self.get_image_processor()
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = 'Alexandra,T-shirt的价格是15便士。'
lowerCamelCase_ = self.prepare_image_inputs()
lowerCamelCase_ = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 183 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : List[str] = MobileBertTokenizer
SCREAMING_SNAKE_CASE : int = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE : Union[str, Any] = True
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Dict = filter_non_english
SCREAMING_SNAKE_CASE : str = """google/mobilebert-uncased"""
def UpperCamelCase ( self : List[str] ) -> Dict:
super().setUp()
lowerCamelCase_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCamelCase_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = 'unwanted, running'
return input_text, output_text
def UpperCamelCase ( self : Dict ) -> Any:
lowerCamelCase_ = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase ( self : Dict ) -> Dict:
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# With lower casing
lowerCamelCase_ = self.get_tokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.get_rust_tokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = 'UNwant\u00E9d,running'
lowerCamelCase_ = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.get_rust_tokenizer()
lowerCamelCase_ = tokenizer.encode(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[str] ) -> str:
lowerCamelCase_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCamelCase ( self : List[Any] ) -> Optional[int]:
lowerCamelCase_ = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase ( self : Any ) -> Any:
lowerCamelCase_ = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase_ = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase ( self : Tuple ) -> List[str]:
lowerCamelCase_ = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase ( self : Optional[int] ) -> Any:
lowerCamelCase_ = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self : Tuple ) -> Tuple:
lowerCamelCase_ = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self : List[str] ) -> List[Any]:
lowerCamelCase_ = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase ( self : Tuple ) -> str:
lowerCamelCase_ = BasicTokenizer(do_lower_case=__SCREAMING_SNAKE_CASE , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCamelCase ( self : List[str] ) -> Any:
lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCamelCase_ = {}
for i, token in enumerate(__SCREAMING_SNAKE_CASE ):
lowerCamelCase_ = i
lowerCamelCase_ = WordpieceTokenizer(vocab=__SCREAMING_SNAKE_CASE , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def UpperCamelCase ( self : List[Any] ) -> Any:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCamelCase ( self : Union[str, Any] ) -> int:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCamelCase ( self : str ) -> Optional[Any]:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def UpperCamelCase ( self : int ) -> List[Any]:
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
@slow
def UpperCamelCase ( self : Dict ) -> List[str]:
lowerCamelCase_ = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' )
lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
lowerCamelCase_ = tokenizer_r.encode_plus(
__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , )
lowerCamelCase_ = tokenizer_r.do_lower_case if hasattr(__SCREAMING_SNAKE_CASE , 'do_lower_case' ) else False
lowerCamelCase_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def UpperCamelCase ( self : Any ) -> List[Any]:
lowerCamelCase_ = ['的', '人', '有']
lowerCamelCase_ = ''.join(__SCREAMING_SNAKE_CASE )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase_ = True
lowerCamelCase_ = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer_p.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer_r.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = False
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer_r.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer_p.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer_r.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer_p.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCamelCase_ = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__SCREAMING_SNAKE_CASE )
]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 183 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> float:
'''simple docstring'''
snake_case : Dict = 0.00
snake_case : Tuple = 0
for resistor in resistors:
if resistor <= 0:
snake_case : int = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(SCREAMING_SNAKE_CASE__ )
first_sum += 1 / float(SCREAMING_SNAKE_CASE__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> float:
'''simple docstring'''
snake_case : Optional[int] = 0.00
snake_case : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
snake_case : List[Any] = F'Resistor at index {index} has a negative value!'
raise ValueError(SCREAMING_SNAKE_CASE__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
lowercase__ = logging.getLogger(__name__)
lowercase__ = {"facebook/bart-base": BartForConditionalGeneration}
lowercase__ = {"facebook/bart-base": BartTokenizer}
def _UpperCamelCase ( ) -> int:
'''simple docstring'''
snake_case : int = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=SCREAMING_SNAKE_CASE__ , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE__ , )
parser.add_argument(
'''--config_name''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=SCREAMING_SNAKE_CASE__ , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='''Where to store the final ONNX file.''' )
snake_case : List[str] = parser.parse_args()
return args
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="cpu" ) -> int:
'''simple docstring'''
snake_case : Dict = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case : Any = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE__ )
if model_name in ["facebook/bart-base"]:
snake_case : Dict = 0
snake_case : Optional[Any] = None
snake_case : int = 0
return huggingface_model, tokenizer
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
'''simple docstring'''
model.eval()
snake_case : List[Any] = None
snake_case : Tuple = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE__ ) )
with torch.no_grad():
snake_case : Optional[int] = '''My friends are cool but they eat too many carbs.'''
snake_case : int = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
snake_case : Dict = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , early_stopping=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE__ , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE__ , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=SCREAMING_SNAKE_CASE__ , )
logger.info('''Model exported to {}'''.format(SCREAMING_SNAKE_CASE__ ) )
snake_case : Any = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE__ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(SCREAMING_SNAKE_CASE__ ) )
snake_case : int = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE__ )
snake_case : Optional[int] = ort_sess.run(
SCREAMING_SNAKE_CASE__ , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(SCREAMING_SNAKE_CASE__ ),
'''max_length''': np.array(SCREAMING_SNAKE_CASE__ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def _UpperCamelCase ( ) -> Any:
'''simple docstring'''
snake_case : List[str] = parse_args()
snake_case : Tuple = 5
snake_case : int = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
snake_case : str = torch.device(args.device )
snake_case ,snake_case : Any = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE__ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(SCREAMING_SNAKE_CASE__ )
if args.max_length:
snake_case : Tuple = args.max_length
if args.num_beams:
snake_case : List[Any] = args.num_beams
if args.output_file_path:
snake_case : str = args.output_file_path
else:
snake_case : int = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 83 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCamelCase : Any = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__lowerCamelCase : str = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__lowerCamelCase : str = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ = ["input_ids", "attention_mask"]
lowerCAmelCase_ = DistilBertTokenizer
def __init__( self : Any , _lowercase : List[Any]=None , _lowercase : int=None , _lowercase : Tuple=True , _lowercase : Dict="[UNK]" , _lowercase : List[Any]="[SEP]" , _lowercase : List[str]="[PAD]" , _lowercase : Union[str, Any]="[CLS]" , _lowercase : Optional[Any]="[MASK]" , _lowercase : int=True , _lowercase : Tuple=None , **_lowercase : Tuple , ):
"""simple docstring"""
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ = getattr(_lowercase , normalizer_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE__ = do_lower_case
SCREAMING_SNAKE_CASE__ = strip_accents
SCREAMING_SNAKE_CASE__ = tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ = normalizer_class(**_lowercase )
SCREAMING_SNAKE_CASE__ = do_lower_case
def __a ( self : List[Any] , _lowercase : Dict , _lowercase : List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self : Dict , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 219 | import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__lowerCamelCase : List[str] = NewType('''DataClass''', Any)
__lowerCamelCase : Dict = NewType('''DataClassType''', Any)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any ) -> int:
"""simple docstring"""
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : list ) -> Callable[[str], Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {str(__UpperCamelCase ): choice for choice in choices}
return lambda __UpperCamelCase : str_to_choice.get(__UpperCamelCase , __UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( *,
__UpperCamelCase : Union[str, List[str]] = None , __UpperCamelCase : str = None , __UpperCamelCase : Any = dataclasses.MISSING , __UpperCamelCase : Callable[[], Any] = dataclasses.MISSING , __UpperCamelCase : dict = None , **__UpperCamelCase : Dict , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE__ = {}
if aliases is not None:
SCREAMING_SNAKE_CASE__ = aliases
if help is not None:
SCREAMING_SNAKE_CASE__ = help
return dataclasses.field(metadata=__UpperCamelCase , default=__UpperCamelCase , default_factory=__UpperCamelCase , **__UpperCamelCase )
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = 42
def __init__( self : int , _lowercase : Union[DataClassType, Iterable[DataClassType]] , **_lowercase : List[str] ):
"""simple docstring"""
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE__ = ArgumentDefaultsHelpFormatter
super().__init__(**_lowercase )
if dataclasses.is_dataclass(_lowercase ):
SCREAMING_SNAKE_CASE__ = [dataclass_types]
SCREAMING_SNAKE_CASE__ = list(_lowercase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_lowercase )
@staticmethod
def __a ( _lowercase : ArgumentParser , _lowercase : dataclasses.Field ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = f"""--{field.name}"""
SCREAMING_SNAKE_CASE__ = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _lowercase ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""aliases""" , [] )
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ = [aliases]
SCREAMING_SNAKE_CASE__ = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(_lowercase , """UnionType""" ) and isinstance(_lowercase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_lowercase ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f""" Problem encountered in field '{field.name}'.""" )
if type(_lowercase ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE__ = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE__ = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE__ = (
field.type.__args__[0] if isinstance(_lowercase , field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE__ = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE__ = {}
if origin_type is Literal or (isinstance(field.type , _lowercase ) and issubclass(field.type , _lowercase )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE__ = field.type.__args__
else:
SCREAMING_SNAKE_CASE__ = [x.value for x in field.type]
SCREAMING_SNAKE_CASE__ = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
else:
SCREAMING_SNAKE_CASE__ = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE__ = copy(_lowercase )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE__ = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE__ = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE__ = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE__ = """?"""
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE__ = True
elif isclass(_lowercase ) and issubclass(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ = field.type.__args__[0]
SCREAMING_SNAKE_CASE__ = """+"""
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = True
else:
SCREAMING_SNAKE_CASE__ = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE__ = field.default_factory()
else:
SCREAMING_SNAKE_CASE__ = True
parser.add_argument(_lowercase , *_lowercase , **_lowercase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE__ = False
parser.add_argument(f"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **_lowercase )
def __a ( self : List[str] , _lowercase : DataClassType ):
"""simple docstring"""
if hasattr(_lowercase , """_argument_group_name""" ):
SCREAMING_SNAKE_CASE__ = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE__ = self
try:
SCREAMING_SNAKE_CASE__ = get_type_hints(_lowercase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(_lowercase ):
SCREAMING_SNAKE_CASE__ = """.""".join(map(_lowercase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(_lowercase ):
if not field.init:
continue
SCREAMING_SNAKE_CASE__ = type_hints[field.name]
self._parse_dataclass_field(_lowercase , _lowercase )
def __a ( self : str , _lowercase : int=None , _lowercase : Optional[Any]=False , _lowercase : Union[str, Any]=True , _lowercase : Any=None , _lowercase : int=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE__ = []
if args_filename:
args_files.append(Path(_lowercase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE__ = ArgumentParser()
args_file_parser.add_argument(_lowercase , type=_lowercase , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = args_file_parser.parse_known_args(args=_lowercase )
SCREAMING_SNAKE_CASE__ = vars(_lowercase ).get(args_file_flag.lstrip("""-""" ) , _lowercase )
if cmd_args_file_paths:
args_files.extend([Path(_lowercase ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE__ = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE__ = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.parse_known_args(args=_lowercase )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(_lowercase ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in vars(_lowercase ).items() if k in keys}
for k in keys:
delattr(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = dtype(**_lowercase )
outputs.append(_lowercase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_lowercase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def __a ( self : List[str] , _lowercase : Dict[str, Any] , _lowercase : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = set(args.keys() )
SCREAMING_SNAKE_CASE__ = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE__ = {f.name for f in dataclasses.fields(_lowercase ) if f.init}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE__ = dtype(**_lowercase )
outputs.append(_lowercase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(_lowercase )}""" )
return tuple(_lowercase )
def __a ( self : List[str] , _lowercase : str , _lowercase : bool = False ):
"""simple docstring"""
with open(Path(_lowercase ) , encoding="""utf-8""" ) as open_json_file:
SCREAMING_SNAKE_CASE__ = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE__ = self.parse_dict(_lowercase , allow_extra_keys=_lowercase )
return tuple(_lowercase )
def __a ( self : Tuple , _lowercase : str , _lowercase : bool = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.parse_dict(yaml.safe_load(Path(_lowercase ).read_text() ) , allow_extra_keys=_lowercase )
return tuple(_lowercase )
| 219 | 1 |
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
__UpperCAmelCase : Tuple = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
__UpperCAmelCase : str = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CamembertTokenizer
lowerCAmelCase__ = CamembertTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case: Union[str, Any] = CamembertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = """<pad>"""
__snake_case: int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_004 )
def UpperCAmelCase__ ( self : List[str] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def UpperCAmelCase__ ( self : str ):
__snake_case: int = CamembertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
__snake_case: Dict = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__snake_case: Union[str, Any] = """I was born in 92000, and this is falsé."""
__snake_case: List[Any] = tokenizer.encode(A )
__snake_case: Tuple = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
__snake_case: Dict = tokenizer.encode(A , add_special_tokens=A )
__snake_case: Union[str, Any] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__snake_case: Tuple = tokenizer.convert_ids_to_tokens(A )
__snake_case: Any = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ ( self : List[str] ):
if not self.test_rust_tokenizer:
return
__snake_case: Any = self.get_tokenizer()
__snake_case: Tuple = self.get_rust_tokenizer()
__snake_case: List[str] = """I was born in 92000, and this is falsé."""
__snake_case: Optional[int] = tokenizer.tokenize(A )
__snake_case: str = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
__snake_case: str = tokenizer.encode(A , add_special_tokens=A )
__snake_case: Dict = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
__snake_case: Tuple = self.get_rust_tokenizer()
__snake_case: str = tokenizer.encode(A )
__snake_case: Any = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
@slow
def UpperCAmelCase__ ( self : List[str] ):
# fmt: off
__snake_case: Tuple = {"""input_ids""": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__snake_case: Optional[Any] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=A , )
| 352 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase : Any = 250_004
__UpperCAmelCase : List[str] = 250_020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MBartaaTokenizer
lowerCAmelCase__ = MBartaaTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case: Optional[int] = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Any = """<s>"""
__snake_case: Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_054 )
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Dict = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
__snake_case: int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case: Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__snake_case: List[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case: int = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
# fmt: off
__snake_case: List[str] = {"""input_ids""": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__snake_case: Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Optional[int] = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Tuple = tokenizer_r.save_pretrained(A )
__snake_case: Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__snake_case: Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: Tuple = tokenizer_r.from_pretrained(A )
__snake_case: Optional[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
__snake_case: Tuple = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: List[str] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: List[Any] = tokenizer_r.from_pretrained(A )
__snake_case: Dict = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: Dict = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case: Any = tokenizer_r.from_pretrained(A )
__snake_case: Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = """facebook/mbart-large-50-one-to-many-mmt"""
lowerCAmelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCAmelCase__ = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def UpperCAmelCase__ ( cls : int ):
__snake_case: MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__snake_case: str = 1
return cls
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250_038 )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.assertIn(A , self.tokenizer.all_special_ids )
__snake_case: Dict = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__snake_case: str = self.tokenizer.decode(A , skip_special_tokens=A )
__snake_case: Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
__snake_case: Union[str, Any] = 10
__snake_case: List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[0] , A )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(A ) , A )
def UpperCAmelCase__ ( self : Tuple ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_053, 250_001] )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = tempfile.mkdtemp()
__snake_case: Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
__snake_case: Union[str, Any] = MBartaaTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
__snake_case: List[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__snake_case: Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__snake_case: List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
__snake_case: Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
__snake_case: Dict = targets["""input_ids"""]
__snake_case: Any = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# en_XX, A, test, EOS
"""input_ids""": [[250_004, 62, 3_034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} , )
| 293 | 0 |
import os
def lowerCamelCase__ ( ):
__UpperCAmelCase : List[str] = os.path.dirname(os.path.realpath(__lowerCamelCase ) )
__UpperCAmelCase : List[Any] = os.path.join(__lowerCamelCase , """triangle.txt""" )
with open(__lowerCamelCase ) as f:
__UpperCAmelCase : List[str] = f.readlines()
__UpperCAmelCase : Optional[Any] = []
for line in triangle:
__UpperCAmelCase : int = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(__lowerCamelCase ) )
a.append(__lowerCamelCase )
for i in range(1 , len(__lowerCamelCase ) ):
for j in range(len(a[i] ) ):
__UpperCAmelCase : str = a[i - 1][j] if j != len(a[i - 1] ) else 0
__UpperCAmelCase : int = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__lowerCamelCase , __lowerCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 114 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : Optional[Any] = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class a ( lowercase__ ):
"""simple docstring"""
a : str = 'mvp'
a : Optional[Any] = ['past_key_values']
a : int = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Dict , __lowercase : Tuple=50267 , __lowercase : Optional[int]=1024 , __lowercase : Any=12 , __lowercase : List[Any]=4096 , __lowercase : Optional[Any]=16 , __lowercase : List[str]=12 , __lowercase : Optional[Any]=4096 , __lowercase : Tuple=16 , __lowercase : Union[str, Any]=0.0 , __lowercase : str=0.0 , __lowercase : Any="gelu" , __lowercase : Any=1024 , __lowercase : List[str]=0.1 , __lowercase : List[Any]=0.0 , __lowercase : int=0.0 , __lowercase : str=0.02 , __lowercase : Tuple=0.0 , __lowercase : Union[str, Any]=False , __lowercase : Dict=True , __lowercase : List[Any]=1 , __lowercase : Optional[Any]=0 , __lowercase : Union[str, Any]=2 , __lowercase : Optional[int]=True , __lowercase : Dict=2 , __lowercase : int=2 , __lowercase : Union[str, Any]=False , __lowercase : Union[str, Any]=100 , __lowercase : str=800 , **__lowercase : Union[str, Any] , ) -> Any:
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : Union[str, Any] = max_position_embeddings
__UpperCAmelCase : Optional[Any] = d_model
__UpperCAmelCase : int = encoder_ffn_dim
__UpperCAmelCase : Tuple = encoder_layers
__UpperCAmelCase : List[str] = encoder_attention_heads
__UpperCAmelCase : Any = decoder_ffn_dim
__UpperCAmelCase : List[Any] = decoder_layers
__UpperCAmelCase : str = decoder_attention_heads
__UpperCAmelCase : Optional[int] = dropout
__UpperCAmelCase : Tuple = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : List[Any] = activation_function
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : Any = encoder_layerdrop
__UpperCAmelCase : Union[str, Any] = decoder_layerdrop
__UpperCAmelCase : List[Any] = classifier_dropout
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : int = encoder_layers
__UpperCAmelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Tuple = use_prompt
__UpperCAmelCase : str = prompt_length
__UpperCAmelCase : Union[str, Any] = prompt_mid_dim
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , __lowercase ):
__UpperCAmelCase : Optional[Any] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 114 | 1 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : BigBirdConfig
UpperCAmelCase__ : jnp.dtype = jnp.floataa
UpperCAmelCase__ : bool = True
def UpperCAmelCase(self : Any ) -> Optional[Any]:
super().setup()
snake_case = nn.Dense(5 , dtype=self.dtype )
def __call__(self : Optional[Any] , *_A : Optional[Any] , **_A : Dict ) -> str:
snake_case = super().__call__(*_A , **_A )
snake_case = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Dict = FlaxBigBirdForNaturalQuestionsModule
def lowercase_ ( A__ , A__ , A__ , A__ , A__ , A__ ) -> Any:
"""simple docstring"""
def cross_entropy(A__ , A__ , A__=None ):
snake_case = logits.shape[-1]
snake_case = (labels[..., None] == jnp.arange(A__ )[None]).astype("f4" )
snake_case = jax.nn.log_softmax(A__ , axis=-1 )
snake_case = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
snake_case = reduction(A__ )
return loss
snake_case = partial(A__ , reduction=jnp.mean )
snake_case = cross_entropy(A__ , A__ )
snake_case = cross_entropy(A__ , A__ )
snake_case = cross_entropy(A__ , A__ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = "google/bigbird-roberta-base"
UpperCAmelCase__ : int = 30_00
UpperCAmelCase__ : int = 1_05_00
UpperCAmelCase__ : int = 1_28
UpperCAmelCase__ : int = 3
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 5
# tx_args
UpperCAmelCase__ : float = 3e-5
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 2_00_00
UpperCAmelCase__ : float = 0.0095
UpperCAmelCase__ : str = "bigbird-roberta-natural-questions"
UpperCAmelCase__ : str = "training-expt"
UpperCAmelCase__ : str = "data/nq-training.jsonl"
UpperCAmelCase__ : str = "data/nq-validation.jsonl"
def UpperCAmelCase(self : str ) -> str:
os.makedirs(self.base_dir , exist_ok=_A )
snake_case = os.path.join(self.base_dir , self.save_dir )
snake_case = self.batch_size_per_device * jax.device_count()
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : int
UpperCAmelCase__ : int = 40_96 # no dynamic padding on TPUs
def __call__(self : Union[str, Any] , _A : Optional[int] ) -> List[str]:
snake_case = self.collate_fn(_A )
snake_case = jax.tree_util.tree_map(_A , _A )
return batch
def UpperCAmelCase(self : str , _A : List[Any] ) -> str:
snake_case , snake_case = self.fetch_inputs(features["input_ids"] )
snake_case = {
"input_ids": jnp.array(_A , dtype=jnp.intaa ),
"attention_mask": jnp.array(_A , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def UpperCAmelCase(self : Any , _A : list ) -> str:
snake_case = [self._fetch_inputs(_A ) for ids in input_ids]
return zip(*_A )
def UpperCAmelCase(self : List[Any] , _A : list ) -> Union[str, Any]:
snake_case = [1 for _ in range(len(_A ) )]
while len(_A ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def lowercase_ ( A__ , A__ , A__=None ) -> Dict:
"""simple docstring"""
if seed is not None:
snake_case = dataset.shuffle(seed=A__ )
for i in range(len(A__ ) // batch_size ):
snake_case = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(A__ )
@partial(jax.pmap , axis_name="batch" )
def lowercase_ ( A__ , A__ , **A__ ) -> Optional[int]:
"""simple docstring"""
def loss_fn(A__ ):
snake_case = model_inputs.pop("start_labels" )
snake_case = model_inputs.pop("end_labels" )
snake_case = model_inputs.pop("pooled_labels" )
snake_case = state.apply_fn(**A__ , params=A__ , dropout_rng=A__ , train=A__ )
snake_case , snake_case , snake_case = outputs
return state.loss_fn(
A__ , A__ , A__ , A__ , A__ , A__ , )
snake_case , snake_case = jax.random.split(A__ )
snake_case = jax.value_and_grad(A__ )
snake_case , snake_case = grad_fn(state.params )
snake_case = jax.lax.pmean({"loss": loss} , axis_name="batch" )
snake_case = jax.lax.pmean(A__ , "batch" )
snake_case = state.apply_gradients(grads=A__ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def lowercase_ ( A__ , **A__ ) -> List[str]:
"""simple docstring"""
snake_case = model_inputs.pop("start_labels" )
snake_case = model_inputs.pop("end_labels" )
snake_case = model_inputs.pop("pooled_labels" )
snake_case = state.apply_fn(**A__ , params=state.params , train=A__ )
snake_case , snake_case , snake_case = outputs
snake_case = state.loss_fn(A__ , A__ , A__ , A__ , A__ , A__ )
snake_case = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class lowerCamelCase ( train_state.TrainState ):
UpperCAmelCase__ : Callable = struct.field(pytree_node=A_ )
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : Args
UpperCAmelCase__ : Callable
UpperCAmelCase__ : Callable
UpperCAmelCase__ : Callable
UpperCAmelCase__ : Callable
UpperCAmelCase__ : wandb
UpperCAmelCase__ : Callable = None
def UpperCAmelCase(self : Union[str, Any] , _A : Dict , _A : Any , _A : str , _A : Any=None ) -> List[Any]:
snake_case = model.params
snake_case = TrainState.create(
apply_fn=model.__call__ , params=_A , tx=_A , loss_fn=_A , )
if ckpt_dir is not None:
snake_case , snake_case , snake_case , snake_case , snake_case = restore_checkpoint(_A , _A )
snake_case = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
snake_case , snake_case = build_tx(**_A )
snake_case = train_state.TrainState(
step=_A , apply_fn=model.__call__ , params=_A , tx=_A , opt_state=_A , )
snake_case = args
snake_case = data_collator
snake_case = lr
snake_case = params
snake_case = jax_utils.replicate(_A )
return state
def UpperCAmelCase(self : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : int ) -> int:
snake_case = self.args
snake_case = len(_A ) // args.batch_size
snake_case = jax.random.PRNGKey(0 )
snake_case = jax.random.split(_A , jax.device_count() )
for epoch in range(args.max_epochs ):
snake_case = jnp.array(0 , dtype=jnp.floataa )
snake_case = get_batched_dataset(_A , args.batch_size , seed=_A )
snake_case = 0
for batch in tqdm(_A , total=_A , desc=f'Running EPOCH-{epoch}' ):
snake_case = self.data_collator(_A )
snake_case , snake_case , snake_case = self.train_step_fn(_A , _A , **_A )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
snake_case = jax_utils.unreplicate(state.step )
snake_case = running_loss.item() / i
snake_case = self.scheduler_fn(state_step - 1 )
snake_case = self.evaluate(_A , _A )
snake_case = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(_A ) )
self.logger.log(_A , commit=_A )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=_A )
def UpperCAmelCase(self : Union[str, Any] , _A : Dict , _A : List[str] ) -> Tuple:
snake_case = get_batched_dataset(_A , self.args.batch_size )
snake_case = len(_A ) // self.args.batch_size
snake_case = jnp.array(0 , dtype=jnp.floataa )
snake_case = 0
for batch in tqdm(_A , total=_A , desc="Evaluating ... " ):
snake_case = self.data_collator(_A )
snake_case = self.val_step_fn(_A , **_A )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def UpperCAmelCase(self : Optional[int] , _A : Optional[int] , _A : Union[str, Any] ) -> Optional[Any]:
snake_case = jax_utils.unreplicate(_A )
print(f'SAVING CHECKPOINT IN {save_dir}' , end=" ... " )
self.model_save_fn(_A , params=state.params )
with open(os.path.join(_A , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_A , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(_A , "data_collator.joblib" ) )
with open(os.path.join(_A , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , _A )
print("DONE" )
def lowercase_ ( A__ , A__ ) -> Any:
"""simple docstring"""
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=" ... " )
with open(os.path.join(A__ , "flax_model.msgpack" ) , "rb" ) as f:
snake_case = from_bytes(state.params , f.read() )
with open(os.path.join(A__ , "opt_state.msgpack" ) , "rb" ) as f:
snake_case = from_bytes(state.opt_state , f.read() )
snake_case = joblib.load(os.path.join(A__ , "args.joblib" ) )
snake_case = joblib.load(os.path.join(A__ , "data_collator.joblib" ) )
with open(os.path.join(A__ , "training_state.json" ) , "r" ) as f:
snake_case = json.load(A__ )
snake_case = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def lowercase_ ( A__ , A__ , A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
snake_case = num_train_steps - warmup_steps
snake_case = optax.linear_schedule(init_value=A__ , end_value=A__ , transition_steps=A__ )
snake_case = optax.linear_schedule(init_value=A__ , end_value=1e-7 , transition_steps=A__ )
snake_case = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowercase_ ( A__ , A__ , A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
def weight_decay_mask(A__ ):
snake_case = traverse_util.flatten_dict(A__ )
snake_case = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(A__ )
snake_case = scheduler_fn(A__ , A__ , A__ , A__ )
snake_case = optax.adamw(learning_rate=A__ , weight_decay=A__ , mask=A__ )
return tx, lr
| 137 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowercase_ ( A__ ) -> str:
"""simple docstring"""
return getitem, k
def lowercase_ ( A__ , A__ ) -> str:
"""simple docstring"""
return setitem, k, v
def lowercase_ ( A__ ) -> List[Any]:
"""simple docstring"""
return delitem, k
def lowercase_ ( A__ , A__ , *A__ ) -> str:
"""simple docstring"""
try:
return fun(A__ , *A__ ), None
except Exception as e:
return None, e
_A = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
_A = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
_A = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
_A = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
_A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def lowercase_ ( A__ ) -> List[Any]:
"""simple docstring"""
snake_case = HashMap(initial_block_size=4 )
snake_case = {}
for _, (fun, *args) in enumerate(A__ ):
snake_case , snake_case = _run_operation(A__ , A__ , *A__ )
snake_case , snake_case = _run_operation(A__ , A__ , *A__ )
assert my_res == py_res
assert str(A__ ) == str(A__ )
assert set(A__ ) == set(A__ )
assert len(A__ ) == len(A__ )
assert set(my.items() ) == set(py.items() )
def lowercase_ ( ) -> Optional[int]:
"""simple docstring"""
def is_public(A__ ) -> bool:
return not name.startswith("_" )
snake_case = {name for name in dir({} ) if is_public(A__ )}
snake_case = {name for name in dir(HashMap() ) if is_public(A__ )}
assert dict_public_names > hash_public_names
| 137 | 1 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Optional[int] , _A :str , _A :Dict ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self :List[str] , _A :int = 1 , _A :Optional[torch.Generator] = None , _A :int = 50 , _A :Optional[str] = "pil" , _A :bool = True , **_A :Tuple , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__A = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_A , )
__A = image.to(self.device )
# set step values
self.scheduler.set_timesteps(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__A = self.unet(_A , _A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__A = self.scheduler.step(_A , _A , _A ).prev_sample
__A = (image / 2 + 0.5).clamp(0 , 1 )
__A = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__A = self.numpy_to_pil(_A )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_A ), "This is a local test"
| 161 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
def __init__( self :List[str] , _A :Tuple , _A :Optional[int]=13 , _A :List[Any]=7 , _A :Tuple=True , _A :Optional[Any]=True , _A :int=True , _A :Union[str, Any]=True , _A :Union[str, Any]=True , _A :Union[str, Any]=False , _A :int=False , _A :Any=False , _A :Tuple=2 , _A :Tuple=99 , _A :Union[str, Any]=0 , _A :Union[str, Any]=32 , _A :str=5 , _A :Optional[Any]=4 , _A :List[str]=0.1 , _A :List[Any]=0.1 , _A :Optional[Any]=512 , _A :Dict=2 , _A :Any=0.02 , _A :int=2 , _A :Dict=4 , _A :Optional[int]="last" , _A :str=True , _A :List[str]=None , _A :Optional[int]=0 , ) -> int:
'''simple docstring'''
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_lengths
__A = use_token_type_ids
__A = use_labels
__A = gelu_activation
__A = sinusoidal_embeddings
__A = causal
__A = asm
__A = n_langs
__A = vocab_size
__A = n_special
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = summary_type
__A = use_proj
__A = scope
__A = bos_token_id
def lowercase_ ( self :int ) -> Tuple:
'''simple docstring'''
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_input_lengths:
__A = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , 2 ).float()
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase_ ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase_ ( self :str , _A :Optional[int] , _A :Dict , _A :Union[str, Any] , _A :List[Any] , _A :str , _A :Union[str, Any] , _A :Optional[Any] , _A :List[str] , _A :Dict , ) -> Any:
'''simple docstring'''
__A = XLMModel(config=_A )
model.to(_A )
model.eval()
__A = model(_A , lengths=_A , langs=_A )
__A = model(_A , langs=_A )
__A = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self :int , _A :List[Any] , _A :List[str] , _A :List[Any] , _A :int , _A :Optional[int] , _A :Optional[Any] , _A :Dict , _A :List[Any] , _A :List[Any] , ) -> List[Any]:
'''simple docstring'''
__A = XLMWithLMHeadModel(_A )
model.to(_A )
model.eval()
__A = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self :Union[str, Any] , _A :str , _A :List[str] , _A :Union[str, Any] , _A :str , _A :Any , _A :Dict , _A :Any , _A :Union[str, Any] , _A :Optional[Any] , ) -> int:
'''simple docstring'''
__A = XLMForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
__A = model(_A )
__A = model(_A , start_positions=_A , end_positions=_A )
__A = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self :Union[str, Any] , _A :Any , _A :Union[str, Any] , _A :str , _A :Dict , _A :Optional[Any] , _A :Union[str, Any] , _A :List[str] , _A :str , _A :Optional[Any] , ) -> int:
'''simple docstring'''
__A = XLMForQuestionAnswering(_A )
model.to(_A )
model.eval()
__A = model(_A )
__A = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
__A = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((__A) , ) = result_with_labels.to_tuple()
__A = model(_A , start_positions=_A , end_positions=_A )
((__A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase_ ( self :Optional[int] , _A :Optional[Any] , _A :Optional[int] , _A :List[Any] , _A :int , _A :Tuple , _A :Union[str, Any] , _A :List[Any] , _A :List[str] , _A :Dict , ) -> str:
'''simple docstring'''
__A = XLMForSequenceClassification(_A )
model.to(_A )
model.eval()
__A = model(_A )
__A = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self :Optional[int] , _A :str , _A :List[str] , _A :Union[str, Any] , _A :Dict , _A :int , _A :Dict , _A :Union[str, Any] , _A :int , _A :Optional[Any] , ) -> List[str]:
'''simple docstring'''
__A = self.num_labels
__A = XLMForTokenClassification(_A )
model.to(_A )
model.eval()
__A = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self :List[str] , _A :Optional[Any] , _A :List[str] , _A :List[Any] , _A :Union[str, Any] , _A :Any , _A :List[str] , _A :Optional[Any] , _A :Any , _A :Tuple , ) -> List[Any]:
'''simple docstring'''
__A = self.num_choices
__A = XLMForMultipleChoice(config=_A )
model.to(_A )
model.eval()
__A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : List[Any] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase_ ( self :int , _A :int , _A :Optional[Any] , _A :Dict , _A :List[Any] , _A :str ) -> str:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase_ ( self :int , _A :Optional[Any] , _A :Dict , _A :Optional[int]=False ) -> List[Any]:
'''simple docstring'''
__A = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
__A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def lowercase_ ( self :Optional[int] ) -> Any:
'''simple docstring'''
__A = XLMModelTester(self )
__A = ConfigTester(self , config_class=_A , emb_dim=37 )
def lowercase_ ( self :Dict ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self :List[Any] ) -> Any:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_A )
def lowercase_ ( self :str ) -> List[Any]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_A )
def lowercase_ ( self :Any ) -> Tuple:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_A )
def lowercase_ ( self :str ) -> str:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_A )
def lowercase_ ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_A )
def lowercase_ ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_A )
def lowercase_ ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_A )
def lowercase_ ( self :Any , _A :str , _A :str , _A :int , _A :Optional[int] , _A :Any , _A :List[Any]=False , _A :Dict=1 ) -> Optional[int]:
'''simple docstring'''
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_A ):
# adds PAD dummy token
__A = min_length + idx + 1
__A = min_length + idx + 1
__A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) )
def lowercase_ ( self :Optional[Any] , _A :str , _A :List[Any] , _A :str , _A :str , _A :int , _A :Union[str, Any]=False , _A :Optional[Any]=1 ) -> Dict:
'''simple docstring'''
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_A ):
# adds PAD dummy token
__A = min_length + idx + 1
__A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , )
pass
@slow
def lowercase_ ( self :int ) -> Tuple:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = XLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class UpperCamelCase__ ( unittest.TestCase):
@slow
def lowercase_ ( self :int ) -> str:
'''simple docstring'''
__A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(_A )
__A = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president
__A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__A = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
| 161 | 1 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A_ : Dict = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = PegasusTokenizer
UpperCAmelCase = PegasusTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def _snake_case ( self ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : Any = PegasusTokenizer(a_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self ) -> str:
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def _snake_case ( self ,**a_ ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**a_ )
def _snake_case ( self ,a_ ) -> int:
return ("This is a test", "This is a test")
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Tuple = """</s>"""
_UpperCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) ,a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) ,a_ )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<pad>""" )
self.assertEqual(vocab_keys[1] ,"""</s>""" )
self.assertEqual(vocab_keys[-1] ,"""v""" )
self.assertEqual(len(a_ ) ,1_103 )
def _snake_case ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size ,1_103 )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase : List[Any] = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
_UpperCAmelCase : Tuple = rust_tokenizer([raw_input_str] ,return_tensors=a_ ,add_special_tokens=a_ ).input_ids[0]
_UpperCAmelCase : str = py_tokenizer([raw_input_str] ,return_tensors=a_ ,add_special_tokens=a_ ).input_ids[0]
self.assertListEqual(a_ ,a_ )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_UpperCAmelCase : Optional[int] = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
_UpperCAmelCase : Optional[int] = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
_UpperCAmelCase : List[Any] = tokenizer([raw_input_str] ,return_tensors=a_ ).input_ids[0]
self.assertListEqual(a_ ,a_ )
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[str] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
_UpperCAmelCase : Tuple = """To ensure a smooth flow of bank resolutions."""
_UpperCAmelCase : Dict = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
_UpperCAmelCase : List[str] = tokenizer([raw_input_str] ,return_tensors=a_ ).input_ids[0]
self.assertListEqual(a_ ,a_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _snake_case ( self ) -> Union[str, Any]:
_UpperCAmelCase : Optional[int] = ["""This is going to be way too long.""" * 150, """short example"""]
_UpperCAmelCase : Any = ["""not super long but more than 5 tokens""", """tiny"""]
_UpperCAmelCase : Optional[int] = self._large_tokenizer(a_ ,padding=a_ ,truncation=a_ ,return_tensors="""pt""" )
_UpperCAmelCase : int = self._large_tokenizer(
text_target=a_ ,max_length=5 ,padding=a_ ,truncation=a_ ,return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(a_ ) == 2 # input_ids, attention_mask.
@slow
def _snake_case ( self ) -> int:
# fmt: off
_UpperCAmelCase : List[Any] = {"""input_ids""": [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ ,model_name="""google/bigbird-pegasus-large-arxiv""" ,revision="""ba85d0851d708441f91440d509690f1ab6353415""" ,)
@require_sentencepiece
@require_tokenizers
class lowercase ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = PegasusTokenizer
UpperCAmelCase = PegasusTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def _snake_case ( self ) -> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : List[Any] = PegasusTokenizer(a_ ,offset=0 ,mask_token_sent=a_ ,mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self ) -> str:
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def _snake_case ( self ,**a_ ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**a_ )
def _snake_case ( self ,a_ ) -> Union[str, Any]:
return ("This is a test", "This is a test")
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
_UpperCAmelCase : Dict = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
_UpperCAmelCase : Tuple = rust_tokenizer([raw_input_str] ,return_tensors=a_ ,add_special_tokens=a_ ).input_ids[0]
_UpperCAmelCase : Optional[int] = py_tokenizer([raw_input_str] ,return_tensors=a_ ,add_special_tokens=a_ ).input_ids[0]
self.assertListEqual(a_ ,a_ )
@require_torch
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : Optional[int] = ["""This is going to be way too long.""" * 1_000, """short example"""]
_UpperCAmelCase : Tuple = ["""not super long but more than 5 tokens""", """tiny"""]
_UpperCAmelCase : int = self._large_tokenizer(a_ ,padding=a_ ,truncation=a_ ,return_tensors="""pt""" )
_UpperCAmelCase : int = self._large_tokenizer(
text_target=a_ ,max_length=5 ,padding=a_ ,truncation=a_ ,return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(a_ ) == 2 # input_ids, attention_mask.
def _snake_case ( self ) -> List[str]:
_UpperCAmelCase : List[str] = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
_UpperCAmelCase : List[Any] = self._large_tokenizer(a_ ).input_ids
self.assertListEqual(
a_ ,[182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] ,)
| 358 |
'''simple docstring'''
import unittest
from knapsack import greedy_knapsack as kp
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : Any = [10, 20, 30, 40, 50, 60]
_UpperCAmelCase : Dict = [2, 4, 6, 8, 10, 12]
_UpperCAmelCase : Optional[int] = 100
self.assertEqual(kp.calc_profit(a_ ,a_ ,a_ ) ,210 )
def _snake_case ( self ) -> Union[str, Any]:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Any:
self.assertRaisesRegex(a_ ,"""Weight can not be negative.""" )
def _snake_case ( self ) -> Optional[Any]:
self.assertRaisesRegex(a_ ,"""Profit can not be negative.""" )
def _snake_case ( self ) -> Dict:
self.assertRaisesRegex(a_ ,"""max_weight must greater than zero.""" )
def _snake_case ( self ) -> Tuple:
self.assertRaisesRegex(
a_ ,"""The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 349 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ :str = logging.get_logger(__name__)
a_ :Optional[int] = {"vocab_file": "vocab.txt"}
a_ :Optional[Any] = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
a_ :Any = {
"openbmb/cpm-ant-10b": 1_024,
}
def lowercase_ (A : int ):
snake_case__ : Tuple = collections.OrderedDict()
with open(snake_case_ , 'r' , encoding='utf-8' ) as reader:
snake_case__ : str = reader.readlines()
for index, token in enumerate(snake_case_ ):
snake_case__ : Any = token.rstrip('\n' )
snake_case__ : List[str] = index
return vocab
class snake_case__ ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : List[str], _snake_case : Any, _snake_case : Dict="<unk>", _snake_case : Tuple=2_0_0 ) ->str:
snake_case__ : Optional[Any] = vocab
snake_case__ : str = unk_token
snake_case__ : Optional[int] = max_input_chars_per_word
def lowercase_ ( self : Optional[Any], _snake_case : str ) ->int:
snake_case__ : Optional[int] = list(_a )
if len(_a ) > self.max_input_chars_per_word:
return [self.unk_token]
snake_case__ : Union[str, Any] = 0
snake_case__ : List[str] = []
while start < len(_a ):
snake_case__ : int = len(_a )
snake_case__ : Any = None
while start < end:
snake_case__ : Optional[int] = """""".join(chars[start:end] )
if substr in self.vocab:
snake_case__ : Dict = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_a )
snake_case__ : Any = end
return sub_tokens
class snake_case__ ( UpperCamelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
_SCREAMING_SNAKE_CASE = False
def __init__( self : List[str], _snake_case : Tuple, _snake_case : List[Any]="<d>", _snake_case : Tuple="</d>", _snake_case : str="<s>", _snake_case : Optional[Any]="</s>", _snake_case : Optional[Any]="<pad>", _snake_case : Dict="<unk>", _snake_case : Union[str, Any]="</n>", _snake_case : List[Any]="</_>", _snake_case : Tuple="left", **_snake_case : Dict, ) ->Tuple:
requires_backends(self, ['jieba'] )
super().__init__(
bod_token=_a, eod_token=_a, bos_token=_a, eos_token=_a, pad_token=_a, unk_token=_a, line_token=_a, space_token=_a, padding_side=_a, **_a, )
snake_case__ : List[Any] = bod_token
snake_case__ : List[str] = eod_token
snake_case__ : List[str] = load_vocab(_a )
snake_case__ : List[str] = self.encoder[space_token]
snake_case__ : Tuple = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
snake_case__ : Optional[int] = collections.OrderedDict(sorted(self.encoder.items(), key=lambda _snake_case : x[1] ) )
snake_case__ : str = {v: k for k, v in self.encoder.items()}
snake_case__ : List[Any] = WordpieceTokenizer(vocab=self.encoder, unk_token=self.unk_token )
@property
def lowercase_ ( self : Optional[Any] ) ->str:
return self.encoder[self.bod_token]
@property
def lowercase_ ( self : Union[str, Any] ) ->int:
return self.encoder[self.eod_token]
@property
def lowercase_ ( self : Optional[Any] ) ->Optional[Any]:
return self.encoder["\n"]
@property
def lowercase_ ( self : List[Any] ) ->int:
return len(self.encoder )
def lowercase_ ( self : Tuple ) ->List[Any]:
return dict(self.encoder, **self.added_tokens_encoder )
def lowercase_ ( self : Any, _snake_case : int ) ->Dict:
snake_case__ : Optional[int] = []
for x in jieba.cut(_a, cut_all=_a ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_a ) )
return output_tokens
def lowercase_ ( self : List[Any], _snake_case : Dict, **_snake_case : Optional[Any] ) ->str:
snake_case__ : List[str] = [i for i in token_ids if i >= 0]
snake_case__ : Dict = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_a, **_a )
def lowercase_ ( self : Any, _snake_case : Union[str, Any] ) ->Union[str, Any]:
return token in self.encoder
def lowercase_ ( self : Optional[Any], _snake_case : Union[str, Any] ) ->str:
return "".join(_a )
def lowercase_ ( self : Optional[Any], _snake_case : str ) ->str:
return self.encoder.get(_a, self.encoder.get(self.unk_token ) )
def lowercase_ ( self : Optional[int], _snake_case : Optional[int] ) ->Tuple:
return self.decoder.get(_a, self.unk_token )
def lowercase_ ( self : Union[str, Any], _snake_case : int, _snake_case : List[Any] = None ) ->Tuple[str]:
if os.path.isdir(_a ):
snake_case__ : List[Any] = os.path.join(
_a, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
snake_case__ : Optional[int] = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
snake_case__ : Dict = 0
if " " in self.encoder:
snake_case__ : str = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
snake_case__ : Optional[int] = self.encoder["""\n"""]
del self.encoder["\n"]
snake_case__ : int = collections.OrderedDict(sorted(self.encoder.items(), key=lambda _snake_case : x[1] ) )
with open(_a, 'w', encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
snake_case__ : str = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def lowercase_ ( self : Dict, _snake_case : Dict, _snake_case : Any = None ) ->List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowercase_ ( self : Optional[int], _snake_case : List[Any], _snake_case : List[Any] = None, _snake_case : str = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a, token_ids_a=_a, already_has_special_tokens=_a )
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a ))
return [1] + ([0] * len(_a ))
| 277 |
def lowerCAmelCase_ ( snake_case_ ):
if n_term == "":
return []
_A : list = []
for temp in range(int(snake_case_ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 26 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
a_ = pytest.mark.integration
@require_faiss
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(snake_case ) for x in np.arange(3_0 ).tolist()]} )
return dset
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
"""simple docstring"""
import faiss
UpperCamelCase_ : Dataset = self._create_dummy_dataset()
UpperCamelCase_ : Optional[Any] = dset.map(
lambda snake_case , snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=snake_case , keep_in_memory=snake_case )
UpperCamelCase_ : List[Any] = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase_ : str = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
import faiss
UpperCamelCase_ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , )
UpperCamelCase_ : Union[str, Any] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
"""simple docstring"""
import faiss
UpperCamelCase_ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=snake_case ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase_ : Tuple = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
"""simple docstring"""
from elasticsearch import Elasticsearch
UpperCamelCase_ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
UpperCamelCase_ : Any = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 3_0 )
UpperCamelCase_ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 2_9}]}}
UpperCamelCase_ : List[Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=snake_case )
UpperCamelCase_ : int = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
"""simple docstring"""
import faiss
UpperCamelCase_ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 1_0 )
# single query
UpperCamelCase_ : List[str] = np.zeros(5 , dtype=np.floataa )
UpperCamelCase_ : int = 1
UpperCamelCase_ : Union[str, Any] = index.search(snake_case )
self.assertRaises(snake_case , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
UpperCamelCase_ : Any = np.eye(5 , dtype=np.floataa )[::-1]
UpperCamelCase_ : Optional[Any] = index.search_batch(snake_case )
self.assertRaises(snake_case , index.search_batch , queries[0] )
UpperCamelCase_ : Optional[int] = [scores[0] for scores in total_scores]
UpperCamelCase_ : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(snake_case ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
import faiss
UpperCamelCase_ : Tuple = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
UpperCamelCase_ : int = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(snake_case ):
UpperCamelCase_ : Union[str, Any] = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
import faiss
UpperCamelCase_ : Any = faiss.IndexFlat(5 )
UpperCamelCase_ : int = FaissIndex(custom_index=snake_case )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
"""simple docstring"""
import faiss
UpperCamelCase_ : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=snake_case ) as tmp_file:
index.save(tmp_file.name )
UpperCamelCase_ : str = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase_ : int = np.zeros(5 , dtype=np.floataa )
UpperCamelCase_ : Tuple = 1
UpperCamelCase_ : Any = index.search(snake_case )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __lowercase ( lowerCamelCase : Any ):
import faiss
UpperCamelCase_ : List[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
UpperCamelCase_ : List[str] = 'index.faiss'
UpperCamelCase_ : Tuple = F"mock://{index_name}"
index.save(lowerCamelCase , storage_options=mockfs.storage_options )
UpperCamelCase_ : List[str] = FaissIndex.load(lowerCamelCase , storage_options=mockfs.storage_options )
UpperCamelCase_ : List[Any] = np.zeros(5 , dtype=np.floataa )
UpperCamelCase_ : List[str] = 1
UpperCamelCase_ : int = index.search(lowerCamelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowercase ( snake_case_ ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
UpperCamelCase_ : str = Elasticsearch()
UpperCamelCase_ : List[str] = {'acknowledged': True}
UpperCamelCase_ : Optional[Any] = ElasticSearchIndex(es_client=snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
UpperCamelCase_ : int = 'foo'
UpperCamelCase_ : int = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
UpperCamelCase_ : Union[str, Any] = index.search(snake_case )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
UpperCamelCase_ : List[Any] = 'foo'
UpperCamelCase_ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
UpperCamelCase_ : Union[str, Any] = index.search(snake_case , request_timeout=3_0 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
UpperCamelCase_ : List[Any] = ['foo', 'bar', 'foobar']
UpperCamelCase_ : Optional[int] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
UpperCamelCase_ : Union[str, Any] = index.search_batch(snake_case )
UpperCamelCase_ : Optional[Any] = [scores[0] for scores in total_scores]
UpperCamelCase_ : str = [indices[0] for indices in total_indices]
self.assertGreater(np.min(snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , snake_case )
# batched queries with timeout
UpperCamelCase_ : Any = ['foo', 'bar', 'foobar']
UpperCamelCase_ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
UpperCamelCase_ : Optional[int] = index.search_batch(snake_case , request_timeout=3_0 )
UpperCamelCase_ : Optional[Any] = [scores[0] for scores in total_scores]
UpperCamelCase_ : Optional[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , snake_case )
| 364 | import math
import flax.linen as nn
import jax.numpy as jnp
def __lowercase ( lowerCamelCase : jnp.ndarray , lowerCamelCase : int , lowerCamelCase : float = 1 , lowerCamelCase : float = 1 , lowerCamelCase : float = 1.0e4 , lowerCamelCase : bool = False , lowerCamelCase : float = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"Embedding dimension {embedding_dim} should be even"
UpperCamelCase_ : Dict = float(embedding_dim // 2 )
UpperCamelCase_ : Dict = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCamelCase_ : Optional[int] = min_timescale * jnp.exp(jnp.arange(lowerCamelCase , dtype=jnp.floataa ) * -log_timescale_increment )
UpperCamelCase_ : int = jnp.expand_dims(lowerCamelCase , 1 ) * jnp.expand_dims(lowerCamelCase , 0 )
# scale embeddings
UpperCamelCase_ : Tuple = scale * emb
if flip_sin_to_cos:
UpperCamelCase_ : Tuple = jnp.concatenate([jnp.cos(lowerCamelCase ), jnp.sin(lowerCamelCase )] , axis=1 )
else:
UpperCamelCase_ : Optional[int] = jnp.concatenate([jnp.sin(lowerCamelCase ), jnp.cos(lowerCamelCase )] , axis=1 )
UpperCamelCase_ : Optional[Any] = jnp.reshape(lowerCamelCase , [jnp.shape(lowerCamelCase )[0], embedding_dim] )
return signal
class _lowercase ( nn.Module ):
lowercase = 3_2
lowercase = jnp.floataa
@nn.compact
def __call__( self : str , snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_1' )(snake_case )
UpperCamelCase_ : int = nn.silu(snake_case )
UpperCamelCase_ : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='linear_2' )(snake_case )
return temb
class _lowercase ( nn.Module ):
lowercase = 3_2
lowercase = False
lowercase = 1
@nn.compact
def __call__( self : int , snake_case : Any ) -> str:
"""simple docstring"""
return get_sinusoidal_embeddings(
snake_case , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 50 | 0 |
"""simple docstring"""
A_ : Union[str, Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
A_ : list[bool | None] = [None] * 10_000_000
A_ : List[str] = True
A_ : int = False
def A ( snake_case__ ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
SCREAMING_SNAKE_CASE__ = chain(next_number(snake_case__ ) )
SCREAMING_SNAKE_CASE__ = number_chain
while number < 10_00_00_00:
SCREAMING_SNAKE_CASE__ = number_chain
number *= 10
return number_chain
def A ( snake_case__ = 10_00_00_00 ):
'''simple docstring'''
for i in range(1 , snake_case__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 165 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A_ : List[str] = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = ["YolosFeatureExtractor"]
A_ : Optional[int] = ["YolosImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = [
"YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST",
"YolosForObjectDetection",
"YolosModel",
"YolosPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
A_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 165 | 1 |
"""simple docstring"""
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _lowerCamelCase ( lowerCamelCase__ ):
_lowerCamelCase :Tuple = 'facebook/bart-large-mnli'
_lowerCamelCase :Tuple = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
_lowerCamelCase :Any = 'text_classifier'
_lowerCamelCase :Any = AutoTokenizer
_lowerCamelCase :Dict = AutoModelForSequenceClassification
_lowerCamelCase :Any = ['text', ['text']]
_lowerCamelCase :int = ['text']
def _lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
super().setup()
lowerCAmelCase__ : str = self.model.config
lowerCAmelCase__ : Optional[Any] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
lowerCAmelCase__ : Tuple = int(UpperCamelCase )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[str] = labels
return self.pre_processor(
[text] * len(UpperCamelCase ) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : str ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Dict = outputs.logits
lowerCAmelCase__ : Optional[Any] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 356 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_A = open # noqa: we just need to have a builtin inside this module to test it properly
| 212 | 0 |
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
if n == 0:
return 0
_A : Tuple = float("""-inf""" )
for i in range(1,n + 1 ):
_A : str = max(
snake_case_,prices[i - 1] + naive_cut_rod_recursive(n - i,snake_case_ ) )
return max_revue
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
_A : Dict = [float("""-inf""" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(snake_case_,snake_case_,snake_case_ )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_A : List[str] = float("""-inf""" )
for i in range(1,n + 1 ):
_A : Optional[Any] = max(
snake_case_,prices[i - 1] + _top_down_cut_rod_recursive(n - i,snake_case_,snake_case_ ),)
_A : Tuple = max_revenue
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_enforce_args(snake_case_,snake_case_ )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_A : List[Any] = [float("""-inf""" ) for _ in range(n + 1 )]
_A : Any = 0
for i in range(1,n + 1 ):
_A : Optional[Any] = max_rev[i]
for j in range(1,i + 1 ):
_A : int = max(snake_case_,prices[j - 1] + max_rev[i - j] )
_A : int = max_revenue_i
return max_rev[n]
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if n < 0:
_A : Optional[Any] = f'''n must be greater than or equal to 0. Got n = {n}'''
raise ValueError(snake_case_ )
if n > len(snake_case_ ):
_A : Any = (
"""Each integral piece of rod must have a corresponding price. """
f'''Got n = {n} but length of prices = {len(snake_case_ )}'''
)
raise ValueError(snake_case_ )
def lowerCAmelCase_ ( ):
_A : Tuple = [6, 10, 12, 15, 20, 23]
_A : List[Any] = len(snake_case_ )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_A : Any = 36
_A : List[Any] = top_down_cut_rod(snake_case_,snake_case_ )
_A : List[Any] = bottom_up_cut_rod(snake_case_,snake_case_ )
_A : Dict = naive_cut_rod_recursive(snake_case_,snake_case_ )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 26 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCAmelCase_ ( snake_case_ ):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_A : List[str] = model_type_to_module_name(snake_case_ )
_A : List[Any] = importlib.import_module(f'''.{module_name}''',"""transformers.models""" )
try:
return getattr(snake_case_,snake_case_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(snake_case_,"""__name__""",snake_case_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_A : List[Any] = importlib.import_module("""transformers""" )
if hasattr(snake_case_,snake_case_ ):
return getattr(snake_case_,snake_case_ )
return None
def lowerCAmelCase_ ( snake_case_,snake_case_ = None,snake_case_ = False,snake_case_ = False,snake_case_ = None,snake_case_ = None,snake_case_ = None,snake_case_ = False,**snake_case_,):
_A : Optional[int] = get_file_from_repo(
snake_case_,snake_case_,cache_dir=snake_case_,force_download=snake_case_,resume_download=snake_case_,proxies=snake_case_,use_auth_token=snake_case_,revision=snake_case_,local_files_only=snake_case_,)
if resolved_config_file is None:
logger.info(
"""Could not locate the feature extractor configuration file, will try to use the model config instead.""" )
return {}
with open(snake_case_,encoding="""utf-8""" ) as reader:
return json.load(snake_case_ )
class lowercase :
def __init__( self ) -> List[Any]:
raise EnvironmentError(
"""AutoFeatureExtractor is designed to be instantiated """
"""using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(_a )
def a__ ( cls , _a , **_a ) -> Any:
_A : Tuple = kwargs.pop("""config""" , _a )
_A : Tuple = kwargs.pop("""trust_remote_code""" , _a )
_A : List[Any] = True
_A , _A : Tuple = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a )
_A : Tuple = config_dict.get("""feature_extractor_type""" , _a )
_A : int = None
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
_A : Optional[int] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_a , _a ):
_A : int = AutoConfig.from_pretrained(_a , **_a )
# It could be in `config.feature_extractor_type``
_A : Optional[int] = getattr(_a , """feature_extractor_type""" , _a )
if hasattr(_a , """auto_map""" ) and "AutoFeatureExtractor" in config.auto_map:
_A : Tuple = config.auto_map["""AutoFeatureExtractor"""]
if feature_extractor_class is not None:
_A : Optional[Any] = feature_extractor_class_from_name(_a )
_A : List[Any] = feature_extractor_auto_map is not None
_A : Union[str, Any] = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING
_A : Optional[int] = resolve_trust_remote_code(
_a , _a , _a , _a )
if has_remote_code and trust_remote_code:
_A : Dict = get_class_from_dynamic_module(
_a , _a , **_a )
_A : str = kwargs.pop("""code_revision""" , _a )
if os.path.isdir(_a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_a , **_a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_a , **_a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_a ) in FEATURE_EXTRACTOR_MAPPING:
_A : Dict = FEATURE_EXTRACTOR_MAPPING[type(_a )]
return feature_extractor_class.from_dict(_a , **_a )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def a__ ( _a , _a ) -> Optional[int]:
FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
| 26 | 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = '''conditional_detr'''
lowerCamelCase = ['''past_key_values''']
lowerCamelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=3 , __UpperCAmelCase=3_00 , __UpperCAmelCase=6 , __UpperCAmelCase=20_48 , __UpperCAmelCase=8 , __UpperCAmelCase=6 , __UpperCAmelCase=20_48 , __UpperCAmelCase=8 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=2_56 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0_2 , __UpperCAmelCase=1.0 , __UpperCAmelCase=False , __UpperCAmelCase="sine" , __UpperCAmelCase="resnet50" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=0.2_5 , **__UpperCAmelCase , ) -> int:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can\'t specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
_lowerCAmelCase =CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__a , __a ):
_lowerCAmelCase =backbone_config.get("""model_type""" )
_lowerCAmelCase =CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase =config_class.from_dict(__a )
_lowerCAmelCase =use_timm_backbone
_lowerCAmelCase =backbone_config
_lowerCAmelCase =num_channels
_lowerCAmelCase =num_queries
_lowerCAmelCase =d_model
_lowerCAmelCase =encoder_ffn_dim
_lowerCAmelCase =encoder_layers
_lowerCAmelCase =encoder_attention_heads
_lowerCAmelCase =decoder_ffn_dim
_lowerCAmelCase =decoder_layers
_lowerCAmelCase =decoder_attention_heads
_lowerCAmelCase =dropout
_lowerCAmelCase =attention_dropout
_lowerCAmelCase =activation_dropout
_lowerCAmelCase =activation_function
_lowerCAmelCase =init_std
_lowerCAmelCase =init_xavier_std
_lowerCAmelCase =encoder_layerdrop
_lowerCAmelCase =decoder_layerdrop
_lowerCAmelCase =encoder_layers
_lowerCAmelCase =auxiliary_loss
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =backbone
_lowerCAmelCase =use_pretrained_backbone
_lowerCAmelCase =dilation
# Hungarian matcher
_lowerCAmelCase =class_cost
_lowerCAmelCase =bbox_cost
_lowerCAmelCase =giou_cost
# Loss coefficients
_lowerCAmelCase =mask_loss_coefficient
_lowerCAmelCase =dice_loss_coefficient
_lowerCAmelCase =cls_loss_coefficient
_lowerCAmelCase =bbox_loss_coefficient
_lowerCAmelCase =giou_loss_coefficient
_lowerCAmelCase =focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def _lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self ) -> int:
return self.d_model
def _lowerCAmelCase ( self ) -> Optional[int]:
_lowerCAmelCase =copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_lowerCAmelCase =self.backbone_config.to_dict()
_lowerCAmelCase =self.__class__.model_type
return output
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = version.parse('''1.11''' )
@property
def _lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _lowerCAmelCase ( self ) -> float:
return 1e-5
@property
def _lowerCAmelCase ( self ) -> int:
return 12
| 362 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 341 | 0 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
a_ : int = logging.get_logger(__name__)
a_ : Optional[Any] = 'T5Config'
class _snake_case ( A__ ):
_lowercase : Optional[Any] = '''mt5'''
_lowercase : Tuple = MTaConfig
class _snake_case ( A__ ):
_lowercase : Union[str, Any] = '''mt5'''
_lowercase : str = MTaConfig
class _snake_case ( A__ ):
_lowercase : Tuple = '''mt5'''
_lowercase : str = MTaConfig
| 137 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _snake_case :
def __init__( self , a , a=13 , a=64 , a=2 , a=3 , a=True , a=True , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=10 , a=0.02 , a=[1, 16, 4, 4] , a=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = type_sequence_label_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scope
SCREAMING_SNAKE_CASE = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE = num_patches + 1
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE = None
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=a , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> int:
SCREAMING_SNAKE_CASE = ViTHybridModel(config=a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> List[Any]:
SCREAMING_SNAKE_CASE = self.type_sequence_label_size
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification(a)
model.to(a)
model.eval()
SCREAMING_SNAKE_CASE = model(a , labels=a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = config_and_inputs
SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : Optional[int] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
_lowercase : str = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
_lowercase : int = False
_lowercase : Any = False
_lowercase : Dict = False
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = ViTHybridModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(a)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear))
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(a)
SCREAMING_SNAKE_CASE = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = ['pixel_values']
self.assertListEqual(arg_names[:1] , a)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = _config_zero_init(a)
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(config=a)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> str:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = ViTHybridModel.from_pretrained(a)
self.assertIsNotNone(a)
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
a)
SCREAMING_SNAKE_CASE = self.default_image_processor
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=a , return_tensors='pt').to(a)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**a)
# verify the logits
SCREAMING_SNAKE_CASE = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , a)
SCREAMING_SNAKE_CASE = torch.tensor([-1.90_90, -0.49_93, -0.23_89]).to(a)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4))
@slow
@require_accelerate
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384')
SCREAMING_SNAKE_CASE = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto')
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(images=a , return_tensors='pt')
SCREAMING_SNAKE_CASE = model(**a)
SCREAMING_SNAKE_CASE = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat')
| 137 | 1 |
def _lowerCAmelCase ( __lowerCAmelCase ) -> bool:
"""simple docstring"""
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
snake_case__ : List[str] = str(__lowerCAmelCase )
snake_case__ : Dict = ''''''.join(sorted(__lowerCAmelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _lowerCAmelCase ( __lowerCAmelCase = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
snake_case__ : int = 0
snake_case__ : Any = 1
while True:
if check_bouncy(__lowerCAmelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(99)}""")
| 44 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class a ( unittest.TestCase ):
def __init__( self :Optional[Any] ,__lowercase :List[Any] ,__lowercase :Tuple=7 ,__lowercase :Optional[Any]=3 ,__lowercase :Dict=3_0 ,__lowercase :Union[str, Any]=4_0_0 ,__lowercase :Optional[int]=True ,__lowercase :int=None ,__lowercase :int=0.9 ,__lowercase :Optional[int]=None ,__lowercase :Dict=True ,__lowercase :str=[0.5, 0.5, 0.5] ,__lowercase :str=[0.5, 0.5, 0.5] ,):
snake_case__ : List[Any] = size if size is not None else {'''shortest_edge''': 3_0}
snake_case__ : Any = crop_size if crop_size is not None else {'''height''': 3_0, '''width''': 3_0}
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Tuple = num_channels
snake_case__ : List[Any] = min_resolution
snake_case__ : int = max_resolution
snake_case__ : str = do_resize_and_center_crop
snake_case__ : Dict = size
snake_case__ : Union[str, Any] = crop_pct
snake_case__ : List[str] = crop_size
snake_case__ : Optional[Any] = do_normalize
snake_case__ : Tuple = image_mean
snake_case__ : List[str] = image_std
def __lowerCamelCase ( self :Any ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Any = PoolFormerImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Tuple = PoolFormerImageProcessingTester(self )
@property
def __lowerCamelCase ( self :Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(__lowercase ,'''size''' ) )
self.assertTrue(hasattr(__lowercase ,'''crop_pct''' ) )
self.assertTrue(hasattr(__lowercase ,'''do_normalize''' ) )
self.assertTrue(hasattr(__lowercase ,'''image_mean''' ) )
self.assertTrue(hasattr(__lowercase ,'''image_std''' ) )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 3_0} )
self.assertEqual(image_processor.crop_size ,{'''height''': 3_0, '''width''': 3_0} )
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict ,size=4_2 ,crop_size=8_4 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size ,{'''height''': 8_4, '''width''': 8_4} )
def __lowerCamelCase ( self :int ):
pass
def __lowerCamelCase ( self :Optional[Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
snake_case__ : Union[str, Any] = image_processing(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def __lowerCamelCase ( self :List[str] ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase ,numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,np.ndarray )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
snake_case__ : str = image_processing(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def __lowerCamelCase ( self :Optional[Any] ):
# Initialize image_processing
snake_case__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase ,torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,torch.Tensor )
# Test not batched input
snake_case__ : Dict = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
snake_case__ : str = image_processing(__lowercase ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
| 44 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
if isinstance(__A , __A ) and isinstance(__A , __A ):
A : Tuple = len(set_a.intersection(__A ) )
if alternative_union:
A : int = len(__A ) + len(__A )
else:
A : Optional[Any] = len(set_a.union(__A ) )
return intersection / union
if isinstance(__A , (list, tuple) ) and isinstance(__A , (list, tuple) ):
A : Tuple = [element for element in set_a if element in set_b]
if alternative_union:
A : Tuple = len(__A ) + len(__A )
return len(__A ) / union
else:
A : List[str] = set_a + [element for element in set_b if element not in set_a]
return len(__A ) / len(__A )
return len(__A ) / len(__A )
return None
if __name__ == "__main__":
lowercase : List[Any] = {'a', 'b', 'c', 'd', 'e'}
lowercase : List[str] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 3 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a__ : Optional[Any] = logging.getLogger(__name__)
class UpperCAmelCase__ :
def __init__( self ) -> Any:
__UpperCamelCase = False
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase ) -> str:
if not self.initialized:
__UpperCamelCase = RagRetriever(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = True
def __lowerCamelCase ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> Dict:
__UpperCamelCase , __UpperCamelCase = self.retriever._main_retrieve(lowercase , lowercase )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( UpperCAmelCase_):
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase=None ) -> List[Any]:
if index is not None and index.is_initialized() and len(lowercase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , index=lowercase , init_retrieval=lowercase , )
__UpperCamelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowercase , lowercase , lowercase , lowercase )
for worker in self.retrieval_workers
] )
def __lowerCamelCase ( self ) -> Dict:
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCamelCase ( self , lowercase , lowercase ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase = ray.get(random_worker.retrieve.remote(lowercase , lowercase ) )
else:
__UpperCamelCase , __UpperCamelCase = self._main_retrieve(lowercase , lowercase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase=None , **lowercase ) -> Any:
return super(lowercase , cls ).get_tokenizers(lowercase , lowercase , **lowercase )
@classmethod
def __lowerCamelCase ( cls , lowercase , lowercase , lowercase=None , **lowercase ) -> int:
__UpperCamelCase = kwargs.pop("""config""" , lowercase ) or RagConfig.from_pretrained(lowercase , **lowercase )
__UpperCamelCase = RagTokenizer.from_pretrained(lowercase , config=lowercase )
__UpperCamelCase = rag_tokenizer.question_encoder
__UpperCamelCase = rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase = """custom"""
__UpperCamelCase = CustomHFIndex(config.retrieval_vector_size , lowercase )
else:
__UpperCamelCase = cls._build_index(lowercase )
return cls(
lowercase , question_encoder_tokenizer=lowercase , generator_tokenizer=lowercase , retrieval_workers=lowercase , index=lowercase , )
| 349 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
def decorator(__UpperCAmelCase ):
_lowercase : Optional[Any] = getattr(a__ , """handle_key""" , [] )
handle += [key]
setattr(a__ , """handle_key""" , a__ )
return func
return decorator
def __SCREAMING_SNAKE_CASE ( *__UpperCAmelCase ):
def decorator(__UpperCAmelCase ):
_lowercase : int = getattr(a__ , """handle_key""" , [] )
handle += keys
setattr(a__ , """handle_key""" , a__ )
return func
return decorator
class UpperCamelCase ( snake_case ):
"""simple docstring"""
def __new__( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ):
_lowercase : Union[str, Any] = super().__new__(cls ,_snake_case ,_snake_case ,_snake_case )
if not hasattr(_snake_case ,"""key_handler""" ):
setattr(_snake_case ,"""key_handler""" ,{} )
setattr(_snake_case ,"""handle_input""" ,KeyHandler.handle_input )
for value in attrs.values():
_lowercase : List[Any] = getattr(_snake_case ,"""handle_key""" ,[] )
for key in handled_keys:
_lowercase : Dict = value
return new_cls
@staticmethod
def lowerCamelCase__ ( cls ):
_lowercase : Optional[int] = get_character()
if char != KEYMAP["undefined"]:
_lowercase : Union[str, Any] = ord(_snake_case )
_lowercase : Dict = cls.key_handler.get(_snake_case )
if handler:
_lowercase : int = char
return handler(cls )
else:
return None
def __SCREAMING_SNAKE_CASE ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 361 |
"""simple docstring"""
import argparse
from collections import defaultdict
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : str = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Any = f.readlines()
_lowercase : Optional[int] = F"""class {class_name}("""
_lowercase : List[str] = F"""{4 * " "}def {test_name}("""
_lowercase : List[Any] = F"""{8 * " "}{correct_line.split()[0]}"""
_lowercase : int = F"""{16 * " "}{correct_line.split()[0]}"""
_lowercase : str = False
_lowercase : Optional[Any] = False
_lowercase : Union[str, Any] = False
_lowercase : Any = False
_lowercase : int = 0
_lowercase : Tuple = 0
_lowercase : Union[str, Any] = []
for line in lines:
if line.startswith(__UpperCAmelCase ):
_lowercase : List[str] = True
elif in_class and line.startswith(__UpperCAmelCase ):
_lowercase : str = True
elif in_class and in_func and (line.startswith(__UpperCAmelCase ) or line.startswith(__UpperCAmelCase )):
_lowercase : Union[str, Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
_lowercase : Optional[int] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
_lowercase : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
_lowercase : Union[str, Any] = False
else:
new_lines.append(__UpperCAmelCase )
with open(__UpperCAmelCase , """w""" ) as f:
for line in new_lines:
f.write(__UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=None ):
if fail is not None:
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : Dict = {l.strip() for l in f.readlines()}
else:
_lowercase : int = None
with open(__UpperCAmelCase , """r""" ) as f:
_lowercase : int = f.readlines()
_lowercase : int = defaultdict(__UpperCAmelCase )
for line in correct_lines:
_lowercase , _lowercase , _lowercase , _lowercase : int = line.split(""";""" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase: List[Any] = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
UpperCAmelCase: Any = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 336 | 0 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase_ : Optional[int] = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : str ) -> int:
for attribute in key.split("." ):
_a = getattr(lowercase , lowercase )
if weight_type is not None:
_a = getattr(lowercase , lowercase ).shape
else:
_a = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
else:
_a = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCamelCase ( lowercase : int , lowercase : str ) -> Optional[Any]:
_a = []
_a = fairseq_model.state_dict()
_a = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == "group" , )
_a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_a = True
if "*" in mapped_key:
_a = name.split(lowercase )[0].split("." )[-2]
_a = mapped_key.replace("*" , lowercase )
if "weight_g" in name:
_a = "weight_g"
elif "weight_v" in name:
_a = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
_a = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_a = "weight"
else:
_a = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Union[str, Any] , lowercase : List[str] , lowercase : Dict ) -> Any:
_a = full_name.split("conv_layers." )[-1]
_a = name.split("." )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_a = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_a = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_a = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_a = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def _lowerCamelCase ( lowercase : int , lowercase : str , lowercase : List[Any]=None ) -> Union[str, Any]:
# load the pre-trained checkpoints
_a = torch.load(lowercase )
_a = WavLMConfigOrig(checkpoint["cfg"] )
_a = WavLMOrig(lowercase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
_a = WavLMConfig.from_pretrained(lowercase )
else:
_a = WavLMConfig()
_a = WavLMModel(lowercase )
recursively_load_weights(lowercase , lowercase )
hf_wavlm.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowerCAmelCase_ : List[Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 63 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='trocr'
__a =['past_key_values']
__a ={
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ):
_a = vocab_size
_a = d_model
_a = decoder_layers
_a = decoder_attention_heads
_a = decoder_ffn_dim
_a = activation_function
_a = max_position_embeddings
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = init_std
_a = decoder_layerdrop
_a = use_cache
_a = scale_embedding
_a = use_learned_position_embeddings
_a = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 63 | 1 |
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = 1
A_ = 2
while i * i <= n:
A_ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def __snake_case ( ):
"""simple docstring"""
A_ = 1
A_ = 1
while True:
i += 1
t_num += i
if count_divisors(__UpperCamelCase ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution()) | 329 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a :Union[str, Any] = {
'configuration_biogpt': ['BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BioGptConfig'],
'tokenization_biogpt': ['BioGptTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[int] = [
'BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BioGptForCausalLM',
'BioGptForTokenClassification',
'BioGptForSequenceClassification',
'BioGptModel',
'BioGptPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__a :str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 329 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = jnp.floataa
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape
__SCREAMING_SNAKE_CASE = jax.image.resize(
__SCREAMING_SNAKE_CASE , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
__SCREAMING_SNAKE_CASE = self.conv(__SCREAMING_SNAKE_CASE )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = jnp.floataa
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.conv(__SCREAMING_SNAKE_CASE )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = None
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = None
lowerCAmelCase__ = jnp.floataa
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = nn.Dense(__SCREAMING_SNAKE_CASE , dtype=self.dtype )
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob )
__SCREAMING_SNAKE_CASE = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__SCREAMING_SNAKE_CASE = None
if use_nin_shortcut:
__SCREAMING_SNAKE_CASE = nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=True ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = self.norma(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = nn.swish(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.conva(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(__SCREAMING_SNAKE_CASE , 1 ) , 1 )
__SCREAMING_SNAKE_CASE = hidden_states + temb
__SCREAMING_SNAKE_CASE = self.norma(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = nn.swish(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.dropout(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.conva(__SCREAMING_SNAKE_CASE )
if self.conv_shortcut is not None:
__SCREAMING_SNAKE_CASE = self.conv_shortcut(__SCREAMING_SNAKE_CASE )
return hidden_states + residual
| 267 |
'''simple docstring'''
def a__ ( a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = int(a__ )
# Initialize Result
__SCREAMING_SNAKE_CASE = []
# Traverse through all denomination
for denomination in reversed(a__ ):
# Find denominations
while int(a__ ) >= int(a__ ):
total_value -= int(a__ )
answer.append(a__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCAmelCase : Dict = []
UpperCAmelCase : List[str] = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
UpperCAmelCase : List[str] = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
UpperCAmelCase : str = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCAmelCase : int = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
UpperCAmelCase : Any = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(f"""Following is minimal change for {value}: """)
UpperCAmelCase : Any = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 267 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__UpperCamelCase : Tuple = get_tests_dir() + '/test_data/fsmt/fsmt_val_data.json'
with io.open(filename, 'r', encoding='utf-8') as f:
__UpperCamelCase : Tuple = json.load(f)
@require_torch
class lowercase__ ( unittest.TestCase):
def __A ( self : Optional[Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(UpperCamelCase__ )
def __A ( self : List[Any] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = FSMTForConditionalGeneration.from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['''en-ru''', 26.0],
['''ru-en''', 22.0],
['''en-de''', 22.0],
['''de-en''', 29.0],
] )
@slow
def __A ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = f"""facebook/wmt19-{pair}"""
SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = self.get_model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = bleu_data[pair]['''src''']
SCREAMING_SNAKE_CASE : Any = bleu_data[pair]['''tgt''']
SCREAMING_SNAKE_CASE : str = tokenizer(UpperCamelCase__ , return_tensors='''pt''' , truncation=UpperCamelCase__ , padding='''longest''' ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertGreaterEqual(scores['''bleu'''] , UpperCamelCase__ )
| 258 | from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = 42
UpperCamelCase_ = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 258 | 1 |
"""simple docstring"""
import enum
import shutil
import sys
lowerCamelCase__ , lowerCamelCase__ = shutil.get_terminal_size()
lowerCamelCase__ = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class A__ ( enum.Enum):
A_ : Union[str, Any] = 0
A_ : List[Any] = 1
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase="" ):
sys.stdout.write(str(_UpperCamelCase ) + end )
sys.stdout.flush()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ):
forceWrite(F"\u001b[{color}m{content}\u001b[0m" , _UpperCamelCase )
def __lowerCAmelCase ():
forceWrite('\r' )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
forceWrite(F"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}" )
def __lowerCAmelCase ():
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def __lowerCAmelCase ():
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH ) | 86 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : str = "docs/source/en/_toctree.yml"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def lowercase ( __magic_name__=False ):
'''simple docstring'''
with open(__magic_name__ , encoding="utf-8" ) as f:
UpperCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase : Any = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : str = api_doc[model_idx]["sections"]
UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : int = modality_doc["sections"]
UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Dict = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Any = model_doc
UpperCAmelCase : Any = api_doc
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 311 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ : Union[str, Any] = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = ['''ConvNextFeatureExtractor''']
lowerCAmelCase_ : Union[str, Any] = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowerCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 248 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
lowerCAmelCase_ : str = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
lowerCAmelCase_ : Any = (
subprocess.check_output(F'git diff --diff-filter=d --name-only {fork_point_sha}'.split()).decode('''utf-8''').split()
)
lowerCAmelCase_ : Optional[int] = '''|'''.join(sys.argv[1:])
lowerCAmelCase_ : Union[str, Any] = re.compile(RF'^({joined_dirs}).*?\.py$')
lowerCAmelCase_ : int = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 248 | 1 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class a :
"""simple docstring"""
@property
def __snake_case ( self : List[Any] ) -> Optional[Any]:
return self.get_dummy_input()
@property
def __snake_case ( self : List[Any] ) -> List[Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Dict=True , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : Tuple=False , lowerCamelCase : List[str]=False , ) -> Optional[Any]:
__snake_case : Dict = 4
__snake_case : str = 32
__snake_case : Dict = (32, 32)
__snake_case : Optional[Any] = torch.manual_seed(0 )
__snake_case : List[str] = torch.device(A_ )
__snake_case : Union[str, Any] = (batch_size, num_channels) + sizes
__snake_case : Any = randn_tensor(A_ , generator=A_ , device=A_ )
__snake_case : int = {'''hidden_states''': hidden_states}
if include_temb:
__snake_case : List[Any] = 128
__snake_case : List[Any] = randn_tensor((batch_size, temb_channels) , generator=A_ , device=A_ )
if include_res_hidden_states_tuple:
__snake_case : Any = torch.manual_seed(1 )
__snake_case : Union[str, Any] = (randn_tensor(A_ , generator=A_ , device=A_ ),)
if include_encoder_hidden_states:
__snake_case : Optional[int] = floats_tensor((batch_size, 32, 32) ).to(A_ )
if include_skip_sample:
__snake_case : Any = randn_tensor(((batch_size, 3) + sizes) , generator=A_ , device=A_ )
return dummy_input
def __snake_case ( self : int ) -> List[Any]:
__snake_case : Dict = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 128,
}
if self.block_type == "up":
__snake_case : Optional[Any] = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
__snake_case : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def __snake_case ( self : int , lowerCamelCase : Dict ) -> Tuple:
__snake_case : Dict = self.prepare_init_args_and_inputs_for_common()
__snake_case : Optional[int] = self.block_class(**A_ )
unet_block.to(A_ )
unet_block.eval()
with torch.no_grad():
__snake_case : List[Any] = unet_block(**A_ )
if isinstance(A_ , A_ ):
__snake_case : Any = output[0]
self.assertEqual(output.shape , self.output_shape )
__snake_case : Optional[int] = output[0, -1, -3:, -3:]
__snake_case : int = torch.tensor(A_ ).to(A_ )
assert torch_all_close(output_slice.flatten() , A_ , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def __snake_case ( self : Dict ) -> List[str]:
__snake_case : int = self.prepare_init_args_and_inputs_for_common()
__snake_case : Any = self.block_class(**A_ )
model.to(A_ )
model.train()
__snake_case : Tuple = model(**A_ )
if isinstance(A_ , A_ ):
__snake_case : int = output[0]
__snake_case : int = torch.device(A_ )
__snake_case : Tuple = randn_tensor(output.shape , device=A_ )
__snake_case : Optional[Any] = torch.nn.functional.mse_loss(A_ , A_ )
loss.backward()
| 123 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase (_UpperCAmelCase ):
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=False , A_=True , A_="None" , A_=3 , A_=4 , A_=None , ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : List[str] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : List[Any] = is_training
__lowerCAmelCase : List[Any] = use_input_mask
__lowerCAmelCase : Optional[int] = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : int = hidden_size
__lowerCAmelCase : Any = num_hidden_layers
__lowerCAmelCase : Any = num_attention_heads
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[str] = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : Optional[int] = initializer_range
__lowerCAmelCase : int = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : List[str] = relative_attention
__lowerCAmelCase : Union[str, Any] = position_biased_input
__lowerCAmelCase : int = pos_att_type
__lowerCAmelCase : List[Any] = scope
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : int = None
if self.use_input_mask:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowerCAmelCase : List[str] = None
if self.use_token_type_ids:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : int = None
__lowerCAmelCase : List[str] = None
if self.use_labels:
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : str = self.get_config()
__lowerCAmelCase : Dict = 300
return config
def UpperCamelCase__ ( self , A_ ) ->Union[str, Any]:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = DebertaModel(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : str = model(A_ , attention_mask=A_ , token_type_ids=A_ )[0]
__lowerCAmelCase : Any = model(A_ , token_type_ids=A_ )[0]
__lowerCAmelCase : List[str] = model(A_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->int:
'''simple docstring'''
__lowerCAmelCase : Tuple = DebertaForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Any = self.num_labels
__lowerCAmelCase : Tuple = DebertaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Union[str, Any] = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(A_ )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.num_labels
__lowerCAmelCase : Optional[int] = DebertaForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : Tuple = model(A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) ->str:
'''simple docstring'''
__lowerCAmelCase : List[str] = DebertaForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
__lowerCAmelCase : int = model(
A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Any = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
), (
__lowerCAmelCase
),
) : Tuple = config_and_inputs
__lowerCAmelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : int = DebertaModelTester(self )
__lowerCAmelCase : List[Any] = ConfigTester(self , config_class=A_ , hidden_size=37 )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*A_ )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*A_ )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*A_ )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*A_ )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*A_ )
@slow
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = DebertaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase (unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
pass
@slow
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : str = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
__lowerCAmelCase : Tuple = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__lowerCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(A_ , attention_mask=A_ )[0]
# compare the actual values for a slice.
__lowerCAmelCase : Optional[Any] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , A_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 275 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , *__A , **__A ) -> None:
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , __A , )
super().__init__(*__A , **__A )
| 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
UpperCAmelCase_ : int = [144, 192, 240]
UpperCAmelCase_ : Union[str, Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
UpperCAmelCase_ : Any = [96, 120, 144]
UpperCAmelCase_ : int = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
UpperCAmelCase_ : List[Any] = [64, 80, 96]
UpperCAmelCase_ : Optional[Any] = [16, 16, 24, 48, 64, 80, 320]
UpperCAmelCase_ : List[str] = 0.0_5
UpperCAmelCase_ : Tuple = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
UpperCAmelCase_ : List[Any] = 512
UpperCAmelCase_ : Any = 16
UpperCAmelCase_ : List[Any] = 21
UpperCAmelCase_ : List[str] = """pascal-voc-id2label.json"""
else:
UpperCAmelCase_ : str = 1000
UpperCAmelCase_ : Optional[Any] = """imagenet-1k-id2label.json"""
UpperCAmelCase_ : Optional[int] = """huggingface/label-files"""
UpperCAmelCase_ : Dict = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase_ : int = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Optional[Any] = idalabel
UpperCAmelCase_ : Any = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( _a : Any , _a : Any=False ):
'''simple docstring'''
for i in range(1 , 6 ):
if F'''layer_{i}.''' in name:
UpperCAmelCase_ : Dict = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
UpperCAmelCase_ : List[Any] = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
UpperCAmelCase_ : Tuple = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
UpperCAmelCase_ : int = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
UpperCAmelCase_ : Optional[int] = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
UpperCAmelCase_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
UpperCAmelCase_ : Any = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
UpperCAmelCase_ : Any = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
UpperCAmelCase_ : int = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
UpperCAmelCase_ : Optional[Any] = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
UpperCAmelCase_ : List[Any] = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
UpperCAmelCase_ : Dict = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''' )
if "expand_1x1" in name:
UpperCAmelCase_ : List[Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
UpperCAmelCase_ : Optional[Any] = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
UpperCAmelCase_ : int = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F'''.global_rep.{i}.weight''' in name:
UpperCAmelCase_ : Any = name.replace(F'''.global_rep.{i}.weight''' , """.layernorm.weight""" )
if F'''.global_rep.{i}.bias''' in name:
UpperCAmelCase_ : int = name.replace(F'''.global_rep.{i}.bias''' , """.layernorm.bias""" )
if ".global_rep." in name:
UpperCAmelCase_ : List[str] = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
UpperCAmelCase_ : Optional[Any] = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
UpperCAmelCase_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
UpperCAmelCase_ : Optional[int] = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
UpperCAmelCase_ : int = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
UpperCAmelCase_ : Any = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
UpperCAmelCase_ : Union[str, Any] = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
UpperCAmelCase_ : int = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
UpperCAmelCase_ : str = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
UpperCAmelCase_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
UpperCAmelCase_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
UpperCAmelCase_ : Tuple = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
UpperCAmelCase_ : Dict = """mobilevit.""" + name
return name
def lowerCamelCase_ ( _a : Tuple , _a : int , _a : List[Any]=False ):
'''simple docstring'''
if base_model:
UpperCAmelCase_ : int = """"""
else:
UpperCAmelCase_ : Union[str, Any] = """mobilevit."""
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : Union[str, Any] = orig_state_dict.pop(__lowerCAmelCase )
if key[:8] == "encoder.":
UpperCAmelCase_ : str = key[8:]
if "qkv" in key:
UpperCAmelCase_ : Union[str, Any] = key.split(""".""" )
UpperCAmelCase_ : Dict = int(key_split[0][6:] ) - 1
UpperCAmelCase_ : Any = int(key_split[3] )
UpperCAmelCase_ : str = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' )
UpperCAmelCase_ : Dict = layer.transformer.layer[transformer_num].attention.attention.all_head_size
UpperCAmelCase_ : int = (
F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
UpperCAmelCase_ : str = val[:dim, :]
UpperCAmelCase_ : Optional[Any] = val[dim : dim * 2, :]
UpperCAmelCase_ : Tuple = val[-dim:, :]
else:
UpperCAmelCase_ : Optional[Any] = val[:dim]
UpperCAmelCase_ : int = val[dim : dim * 2]
UpperCAmelCase_ : Tuple = val[-dim:]
else:
UpperCAmelCase_ : Optional[int] = val
return orig_state_dict
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( _a : Dict , _a : str , _a : List[Any] , _a : int=False ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = get_mobilevit_config(__lowerCAmelCase )
# load original state_dict
UpperCAmelCase_ : Dict = torch.load(__lowerCAmelCase , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
UpperCAmelCase_ : Optional[Any] = MobileViTForSemanticSegmentation(__lowerCAmelCase ).eval()
else:
UpperCAmelCase_ : str = MobileViTForImageClassification(__lowerCAmelCase ).eval()
UpperCAmelCase_ : int = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCAmelCase_ : List[str] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCAmelCase_ : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
UpperCAmelCase_ : Any = model(**__lowerCAmelCase )
UpperCAmelCase_ : Optional[Any] = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
UpperCAmelCase_ : List[Any] = torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
UpperCAmelCase_ : str = torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
UpperCAmelCase_ : List[Any] = torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
UpperCAmelCase_ : Optional[int] = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
UpperCAmelCase_ : Optional[int] = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
UpperCAmelCase_ : Any = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-4 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
UpperCAmelCase_ : Union[str, Any] = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
UpperCAmelCase_ : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(__lowerCAmelCase , organization="""apple""" )
model.push_to_hub(__lowerCAmelCase , organization="""apple""" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
UpperCamelCase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 345 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
snake_case : List[Any] = logging.getLogger(__name__)
snake_case : Optional[int] = 50 # max width of layer names
snake_case : Any = 70 # max width of quantizer names
def __lowercase ( __lowerCAmelCase : Tuple ):
a__ = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=__lowerCAmelCase , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=__lowerCAmelCase , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=__lowerCAmelCase , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=__lowerCAmelCase , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=__lowerCAmelCase , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=__lowerCAmelCase , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
if args.calibrator == "max":
a__ = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
a__ = 'histogram'
elif args.calibrator == "mse":
a__ = 'histogram'
else:
raise ValueError(F'Invalid calibrator {args.calibrator}' )
a__ = QuantDescriptor(num_bits=args.aprec , calib_method=__lowerCAmelCase )
a__ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__lowerCAmelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=False ):
logger.info('Configuring Model for Quantization' )
logger.info(F'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__lowerCAmelCase , ['embeddings'] , which='weight' , _disabled=__lowerCAmelCase )
if args.quant_disable:
set_quantizer_by_name(__lowerCAmelCase , [''] , _disabled=__lowerCAmelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(__lowerCAmelCase , args.quant_disable_keyword , _disabled=__lowerCAmelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(__lowerCAmelCase , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=__lowerCAmelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(__lowerCAmelCase , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=__lowerCAmelCase )
if args.recalibrate_weights:
recalibrate_weights(__lowerCAmelCase )
if args.fuse_qkv:
fuse_qkv(__lowerCAmelCase , __lowerCAmelCase )
if args.clip_gelu:
clip_gelu(__lowerCAmelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] ):
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'{name:80}: {module}' )
def __lowercase ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] ):
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str ):
def fusea(__lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ):
for mod in [qq, qk, qv]:
if not hasattr(__lowerCAmelCase , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
a__ = qq._amax.detach().item()
a__ = qk._amax.detach().item()
a__ = qv._amax.detach().item()
a__ = max(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
qq._amax.fill_(__lowerCAmelCase )
qk._amax.fill_(__lowerCAmelCase )
qv._amax.fill_(__lowerCAmelCase )
logger.info(F' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any] ):
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
a__ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__lowerCAmelCase )
a__ = mod._input_quantizer._amax.data.detach().item()
logger.info(F'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def __lowercase ( __lowerCAmelCase : Optional[Any] ):
for name, mod in model.named_modules():
if hasattr(__lowerCAmelCase , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
a__ = mod.weight.shape[0]
a__ = mod._weight_quantizer._amax.detach()
a__ = torch.ones(__lowerCAmelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def __lowercase ( __lowerCAmelCase : Union[str, Any] ):
for name, mod in model.named_modules():
if hasattr(__lowerCAmelCase , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
a__ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
a__ = set(range(len(mod.weight.size() ) ) ) - axis_set
a__ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__lowerCAmelCase , keepdims=__lowerCAmelCase ).detach()
logger.info(F'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
a__ = amax
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int]=2_5 , __lowerCAmelCase : List[Any]=1_8_0 , __lowerCAmelCase : Tuple=None ):
if ignore is None:
a__ = []
elif not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
a__ = [ignore]
a__ = 0
for name, mod in model.named_modules():
if not hasattr(__lowerCAmelCase , 'weight' ):
continue
a__ = max(__lowerCAmelCase , len(__lowerCAmelCase ) )
for name, mod in model.named_modules():
a__ = getattr(__lowerCAmelCase , '_input_quantizer' , __lowerCAmelCase )
a__ = getattr(__lowerCAmelCase , '_weight_quantizer' , __lowerCAmelCase )
if not hasattr(__lowerCAmelCase , 'weight' ):
continue
if type(__lowerCAmelCase ) in ignore:
continue
if [True for s in ignore if type(__lowerCAmelCase ) is str and s in name]:
continue
a__ = F'Act:{input_q.extra_repr()}'
a__ = F'Wgt:{weight_q.extra_repr()}'
a__ = F'{name:{name_width}} {act_str} {wgt_str}'
if len(__lowerCAmelCase ) <= line_width:
logger.info(__lowerCAmelCase )
else:
logger.info(F'{name:{name_width}} {act_str}' )
logger.info(F'{" ":{name_width}} {wgt_str}' )
def __lowercase ( __lowerCAmelCase : Dict ):
a__ = 0
for name, mod in model.named_modules():
if isinstance(__lowerCAmelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F'{name:80} {mod}' )
count += 1
print(F'{count} TensorQuantizers found in model' )
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Dict ):
a__ = getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if quantizer_mod is not None:
assert hasattr(__lowerCAmelCase , __lowerCAmelCase )
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
logger.warning(F'{name} has no {quantizer}' )
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int]="both" , **__lowerCAmelCase : str ):
a__ = F'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
if which in ["input", "both"]:
set_quantizer(__lowerCAmelCase , __lowerCAmelCase , '_input_quantizer' , __lowerCAmelCase , __lowerCAmelCase )
if which in ["weight", "both"]:
set_quantizer(__lowerCAmelCase , __lowerCAmelCase , '_weight_quantizer' , __lowerCAmelCase , __lowerCAmelCase )
logger.info(__lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple , **__lowerCAmelCase : Optional[Any] ):
for name, mod in model.named_modules():
if hasattr(__lowerCAmelCase , '_input_quantizer' ) or hasattr(__lowerCAmelCase , '_weight_quantizer' ):
for n in names:
if re.search(__lowerCAmelCase , __lowerCAmelCase ):
set_quantizers(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(__lowerCAmelCase , __lowerCAmelCase ):
a__ = F'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
logger.info(__lowerCAmelCase )
| 240 | 0 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# Base Case
if curr_ind == len(UpperCAmelCase_ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCAmelCase_ ) ):
if valid_connection(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# Insert current vertex into path as next transition
__snake_case : Union[str, Any] = next_ver
# Validate created path
if util_hamilton_cycle(UpperCAmelCase_ , UpperCAmelCase_ , curr_ind + 1 ):
return True
# Backtrack
__snake_case : List[str] = -1
return False
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase = 0 ):
__snake_case : Dict = [-1] * (len(UpperCAmelCase_ ) + 1)
# initialize start and end of path with starting index
__snake_case : Dict = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCAmelCase_ , UpperCAmelCase_ , 1 ) else [] | 353 |
from ..utils import DummyObject, requires_backends
class a (metaclass=_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : int = ["speech"]
def __init__( self : List[Any] , *lowerCamelCase : List[Any] , **lowerCamelCase : Optional[Any] ) -> Dict:
requires_backends(self , ["speech"] )
class a (metaclass=_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = ["speech"]
def __init__( self : int , *lowerCamelCase : List[Any] , **lowerCamelCase : List[Any] ) -> Optional[int]:
requires_backends(self , ["speech"] )
| 134 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : int ):
if digit_amount > 0:
return round(number - int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
return number - int(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 62 | import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__a : Tuple = """pt"""
elif is_tf_available():
__a : int = """tf"""
else:
__a : Tuple = """jax"""
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : List[Any] = ByTaTokenizer
__a : str = False
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
__lowercase = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=20 , lowerCAmelCase__=5 ) -> Tuple[str, list]:
'''simple docstring'''
__lowercase = []
for i in range(len(lowerCAmelCase__ ) ):
try:
__lowercase = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowercase = list(filter(lambda lowerCAmelCase__ : re.match(R'''^[ a-zA-Z]+$''' , t[1] ) , lowerCAmelCase__ ) )
__lowercase = list(filter(lambda lowerCAmelCase__ : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase__ ) , lowerCAmelCase__ ) )
if max_length is not None and len(lowerCAmelCase__ ) > max_length:
__lowercase = toks[:max_length]
if min_length is not None and len(lowerCAmelCase__ ) < min_length and len(lowerCAmelCase__ ) > 0:
while len(lowerCAmelCase__ ) < min_length:
__lowercase = toks + toks
# toks_str = [t[1] for t in toks]
__lowercase = [t[0] for t in toks]
# Ensure consistency
__lowercase = tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
if " " not in output_txt and len(lowerCAmelCase__ ) > 1:
__lowercase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase__ )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase__ )
)
if with_prefix_space:
__lowercase = ''' ''' + output_txt
__lowercase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
return output_txt, output_ids
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
__lowercase = tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = '''Unicode €.'''
__lowercase = tokenizer(lowerCAmelCase__ )
__lowercase = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['''input_ids'''] , lowerCAmelCase__ )
# decoding
__lowercase = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , '''Unicode €.</s>''' )
__lowercase = tokenizer('''e è é ê ë''' )
__lowercase = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['''input_ids'''] , lowerCAmelCase__ )
# decoding
__lowercase = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__lowercase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
__lowercase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
if FRAMEWORK != "jax":
__lowercase = list(batch.input_ids.numpy()[0] )
else:
__lowercase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__lowercase = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , lowerCAmelCase__ )
self.assertIn('''attention_mask''' , lowerCAmelCase__ )
self.assertNotIn('''decoder_input_ids''' , lowerCAmelCase__ )
self.assertNotIn('''decoder_attention_mask''' , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = [
'''Summary of the text.''',
'''Another summary.''',
]
__lowercase = tokenizer(
text_target=lowerCAmelCase__ , max_length=32 , padding='''max_length''' , truncation=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.ta_base_tokenizer
__lowercase = ['''A long paragraph for summarization. </s>''']
__lowercase = ['''Summary of the text. </s>''']
# fmt: off
__lowercase = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
__lowercase = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
__lowercase = tokenizer(lowerCAmelCase__ , text_target=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , batch['''input_ids'''][0] )
self.assertEqual(lowerCAmelCase__ , batch['''labels'''][0] )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
__lowercase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
__lowercase = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
shutil.rmtree(lowerCAmelCase__ )
__lowercase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__lowercase = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__lowercase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer.__class__.from_pretrained(lowerCAmelCase__ )
__lowercase = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowercase = tokenizer.__class__.from_pretrained(lowerCAmelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__lowercase = json.load(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__lowercase = json.load(lowerCAmelCase__ )
__lowercase = [F"<extra_id_{i}>" for i in range(1_25 )]
__lowercase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__lowercase = added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(lowerCAmelCase__ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowercase = tokenizer_class.from_pretrained(
lowerCAmelCase__ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowercase = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=lowerCAmelCase__ )]
__lowercase = tokenizer_class.from_pretrained(
lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_class.from_pretrained(lowerCAmelCase__ )
self.assertTrue(tokenizer.decode([2_55] ) == '''''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_tokenizers(fast=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = ['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
__lowercase = tokenizer.convert_tokens_to_string(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__lowercase = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
__lowercase = 0
__lowercase = tokenizer.convert_ids_to_tokens(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
for attr in attributes_list:
setattr(lowerCAmelCase__ , attr + '''_id''' , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + '''_id''' ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , attr + '''_id''' , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(getattr(lowerCAmelCase__ , attr + '''_id''' ) , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' ) , [] )
setattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(lowerCAmelCase__ , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] ) | 210 | 0 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] ) -> Dict:
'''simple docstring'''
A__ : int ={}
def lowercase__ ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]=1 ) -> int:
'''simple docstring'''
if self.graph.get(lowerCAmelCase_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A__ : int =[[w, v]]
if not self.graph.get(lowerCAmelCase_ ):
A__ : Tuple =[]
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return list(self.graph )
def lowercase__ ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCAmelCase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase_ )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : str=-2 , lowerCAmelCase_ : Dict=-1 ) -> Optional[int]:
'''simple docstring'''
if s == d:
return []
A__ : Union[str, Any] =[]
A__ : Union[str, Any] =[]
if s == -2:
A__ : List[Any] =list(self.graph )[0]
stack.append(lowerCAmelCase_ )
visited.append(lowerCAmelCase_ )
A__ : Any =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : Optional[int] =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ : int =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase_ ) != 0:
A__ : str =stack[len(lowerCAmelCase_ ) - 1]
else:
A__ : Optional[Any] =ss
# check if se have reached the starting point
if len(lowerCAmelCase_ ) == 0:
return visited
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Optional[Any]=-1 ) -> List[str]:
'''simple docstring'''
if c == -1:
A__ : Tuple =floor(random() * 1_00_00 ) + 10
for i in range(lowerCAmelCase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
A__ : Dict =floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
def lowercase__ ( self : str , lowerCAmelCase_ : Any=-2 ) -> Tuple:
'''simple docstring'''
A__ : Optional[Any] =deque()
A__ : List[Any] =[]
if s == -2:
A__ : int =list(self.graph )[0]
d.append(lowerCAmelCase_ )
visited.append(lowerCAmelCase_ )
while d:
A__ : Dict =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : List[Any] ) -> str:
'''simple docstring'''
A__ : Tuple =0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowercase__ ( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.graph[u] )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=-2 ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =[]
A__ : int =[]
if s == -2:
A__ : int =list(self.graph )[0]
stack.append(lowerCAmelCase_ )
visited.append(lowerCAmelCase_ )
A__ : Dict =s
A__ : Optional[int] =[]
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : Any =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ : Any =node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase_ ) != 0:
A__ : Any =stack[len(lowerCAmelCase_ ) - 1]
else:
A__ : List[str] =ss
# check if se have reached the starting point
if len(lowerCAmelCase_ ) == 0:
return sorted_nodes
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ : List[str] =[]
A__ : Dict =[]
A__ : List[Any] =list(self.graph )[0]
stack.append(lowerCAmelCase_ )
visited.append(lowerCAmelCase_ )
A__ : List[Any] =-2
A__ : Dict =[]
A__ : List[Any] =s
A__ : str =False
A__ : Optional[int] =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : Optional[Any] =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ : List[str] =len(lowerCAmelCase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ : List[Any] =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ : Any =True
if len(lowerCAmelCase_ ) != 0:
A__ : Dict =stack[len(lowerCAmelCase_ ) - 1]
else:
A__ : Any =False
indirect_parents.append(lowerCAmelCase_ )
A__ : str =s
A__ : Optional[Any] =ss
# check if se have reached the starting point
if len(lowerCAmelCase_ ) == 0:
return list(lowerCAmelCase_ )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
A__ : Any =[]
A__ : Optional[int] =[]
A__ : Union[str, Any] =list(self.graph )[0]
stack.append(lowerCAmelCase_ )
visited.append(lowerCAmelCase_ )
A__ : Union[str, Any] =-2
A__ : Any =[]
A__ : str =s
A__ : Optional[Any] =False
A__ : Tuple =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : str =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ : List[str] =len(lowerCAmelCase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ : List[str] =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ : Optional[int] =True
if len(lowerCAmelCase_ ) != 0:
A__ : Optional[int] =stack[len(lowerCAmelCase_ ) - 1]
else:
A__ : List[str] =False
indirect_parents.append(lowerCAmelCase_ )
A__ : List[str] =s
A__ : Tuple =ss
# check if se have reached the starting point
if len(lowerCAmelCase_ ) == 0:
return False
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Optional[int]=-2 , lowerCAmelCase_ : Union[str, Any]=-1 ) -> int:
'''simple docstring'''
A__ : Tuple =time()
self.dfs(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : int =time()
return end - begin
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Tuple=-2 ) -> int:
'''simple docstring'''
A__ : Union[str, Any] =time()
self.bfs(lowerCAmelCase_ )
A__ : List[Any] =time()
return end - begin
class lowerCamelCase :
'''simple docstring'''
def __init__( self : int ) -> Tuple:
'''simple docstring'''
A__ : str ={}
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any]=1 ) -> Optional[int]:
'''simple docstring'''
# check if the u exists
if self.graph.get(lowerCAmelCase_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A__ : Union[str, Any] =[[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A__ : Any =[[w, u]]
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
if self.graph.get(lowerCAmelCase_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase_ )
# the other way round
if self.graph.get(lowerCAmelCase_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : int=-2 , lowerCAmelCase_ : int=-1 ) -> Optional[Any]:
'''simple docstring'''
if s == d:
return []
A__ : Any =[]
A__ : List[Any] =[]
if s == -2:
A__ : List[Any] =list(self.graph )[0]
stack.append(lowerCAmelCase_ )
visited.append(lowerCAmelCase_ )
A__ : List[Any] =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : Dict =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ : Union[str, Any] =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase_ ) != 0:
A__ : Tuple =stack[len(lowerCAmelCase_ ) - 1]
else:
A__ : List[Any] =ss
# check if se have reached the starting point
if len(lowerCAmelCase_ ) == 0:
return visited
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[str]=-1 ) -> Tuple:
'''simple docstring'''
if c == -1:
A__ : int =floor(random() * 1_00_00 ) + 10
for i in range(lowerCAmelCase_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
A__ : List[str] =floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
def lowercase__ ( self : Dict , lowerCAmelCase_ : List[str]=-2 ) -> Dict:
'''simple docstring'''
A__ : List[Any] =deque()
A__ : Union[str, Any] =[]
if s == -2:
A__ : Dict =list(self.graph )[0]
d.append(lowerCAmelCase_ )
visited.append(lowerCAmelCase_ )
while d:
A__ : Tuple =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowercase__ ( self : Any , lowerCAmelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Union[str, Any] =[]
A__ : str =[]
A__ : str =list(self.graph )[0]
stack.append(lowerCAmelCase_ )
visited.append(lowerCAmelCase_ )
A__ : Dict =-2
A__ : Optional[Any] =[]
A__ : Optional[Any] =s
A__ : int =False
A__ : str =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : str =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ : List[str] =len(lowerCAmelCase_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ : Union[str, Any] =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ : Optional[int] =True
if len(lowerCAmelCase_ ) != 0:
A__ : Optional[Any] =stack[len(lowerCAmelCase_ ) - 1]
else:
A__ : Optional[Any] =False
indirect_parents.append(lowerCAmelCase_ )
A__ : Tuple =s
A__ : Tuple =ss
# check if se have reached the starting point
if len(lowerCAmelCase_ ) == 0:
return list(lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
A__ : List[Any] =[]
A__ : Dict =[]
A__ : List[str] =list(self.graph )[0]
stack.append(lowerCAmelCase_ )
visited.append(lowerCAmelCase_ )
A__ : Dict =-2
A__ : List[str] =[]
A__ : Optional[Any] =s
A__ : Dict =False
A__ : int =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ : Optional[int] =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ : Optional[Any] =len(lowerCAmelCase_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ : Optional[int] =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ : Optional[Any] =True
if len(lowerCAmelCase_ ) != 0:
A__ : List[str] =stack[len(lowerCAmelCase_ ) - 1]
else:
A__ : List[str] =False
indirect_parents.append(lowerCAmelCase_ )
A__ : Optional[Any] =s
A__ : Dict =ss
# check if se have reached the starting point
if len(lowerCAmelCase_ ) == 0:
return False
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
return list(self.graph )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : Any=-2 , lowerCAmelCase_ : Union[str, Any]=-1 ) -> Tuple:
'''simple docstring'''
A__ : Union[str, Any] =time()
self.dfs(lowerCAmelCase_ , lowerCAmelCase_ )
A__ : int =time()
return end - begin
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple=-2 ) -> str:
'''simple docstring'''
A__ : str =time()
self.bfs(lowerCAmelCase_ )
A__ : str =time()
return end - begin
| 136 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
A__ : Any =num_of_nodes
A__ : list[list[int]] =[]
A__ : dict[int, int] ={}
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : int , lowerCAmelCase_ : int ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
A__ : str =self.find_component(lowerCAmelCase_ )
def lowercase__ ( self : int , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
A__ : int =v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
A__ : List[str] =self.find_component(lowerCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCAmelCase_ )
def lowercase__ ( self : str ) -> None:
'''simple docstring'''
A__ : Union[str, Any] =[]
A__ : List[str] =0
A__ : list[Any] =[-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
A__ : List[str] =self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
A__ , A__ , A__ : Any =edge
A__ : Tuple =self.m_component[u]
A__ : Optional[Any] =self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
A__ : Optional[int] =[u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A__ , A__ , A__ : Tuple =edge
A__ : Any =self.m_component[u]
A__ : Optional[Any] =self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
A__ : int =[-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 | 1 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
a_ = 5_0003
a_ = 5_0002
@require_sentencepiece
@require_tokenizers
class lowercase__ ( _UpperCAmelCase, unittest.TestCase ):
a_ =PLBartTokenizer
a_ =None
a_ =False
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = PLBartTokenizer(__UpperCAmelCase , language_codes="base" , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = PLBartTokenizer(__UpperCAmelCase , language_codes="base" , keep_accents=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowerCAmelCase__ = tokenizer.vocab_size
lowerCAmelCase__ = [tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) for x in range(end - 4 , __UpperCAmelCase )]
self.assertListEqual(__UpperCAmelCase , ["__java__", "__python__", "__en_XX__", "<mask>"] )
lowerCAmelCase__ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowerCAmelCase__ = tokenizer(__UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase ) , __UpperCAmelCase , )
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = PLBartTokenizer(__UpperCAmelCase , language_codes="multi" , keep_accents=__UpperCAmelCase )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
lowerCAmelCase__ = tokenizer.vocab_size
lowerCAmelCase__ = [tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) for x in range(end - 7 , __UpperCAmelCase )]
self.assertListEqual(
__UpperCAmelCase , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
lowerCAmelCase__ = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
lowerCAmelCase__ = tokenizer(__UpperCAmelCase ).input_ids
self.assertEqual(
tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase ) , __UpperCAmelCase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase ):
a_ ="""uclanlp/plbart-python-en_XX"""
a_ =[
"""def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""",
"""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""",
]
a_ =[
"""Returns the maximum value of a b c.""",
"""Sums the values of a b c.""",
]
a_ =[
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def UpperCAmelCase ( cls )-> Any:
'''simple docstring'''
lowerCAmelCase__ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
lowerCAmelCase__ = 1
return cls
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 50003 )
def UpperCAmelCase ( self )-> Dict:
'''simple docstring'''
lowerCAmelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
self.assertIn(__UpperCAmelCase , self.tokenizer.all_special_ids )
lowerCAmelCase__ = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
lowerCAmelCase__ = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCAmelCase )
def UpperCAmelCase ( self )-> Any:
'''simple docstring'''
lowerCAmelCase__ = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20]
self.assertIsInstance(src_text[0] , __UpperCAmelCase )
lowerCAmelCase__ = 10
lowerCAmelCase__ = self.tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __UpperCAmelCase )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [50004, 50001] )
def UpperCAmelCase ( self )-> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ = PLBartTokenizer.from_pretrained(__UpperCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCAmelCase )
@require_torch
def UpperCAmelCase ( self )-> str:
'''simple docstring'''
lowerCAmelCase__ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , return_tensors="pt" )
lowerCAmelCase__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __UpperCAmelCase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowerCAmelCase__ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
lowerCAmelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.tokenizer(self.src_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=3 , return_tensors="pt" )
lowerCAmelCase__ = self.tokenizer(
text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=10 , return_tensors="pt" )
lowerCAmelCase__ = targets["input_ids"]
lowerCAmelCase__ = shift_tokens_right(__UpperCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
lowerCAmelCase__ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(__UpperCAmelCase ) , {
# A, test, EOS, en_XX
"input_ids": [[150, 242, 2, 50003]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 50001,
} , )
| 340 |
from collections import defaultdict
from math import gcd
def _a ( UpperCamelCase_ : int = 1_500_000 ) -> int:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(UpperCamelCase_ )
lowerCAmelCase__ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase_ , 2 ):
if gcd(UpperCamelCase_ , UpperCamelCase_ ) > 1:
continue
lowerCAmelCase__ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase_ , limit + 1 , UpperCamelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 340 | 1 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCamelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCamelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCamelCase , 'num_attention_heads' ) )
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=3_2 , _UpperCamelCase=2 , _UpperCamelCase=3 , _UpperCamelCase=6_4_0 , _UpperCamelCase=4 , _UpperCamelCase="silu" , _UpperCamelCase=3 , _UpperCamelCase=3_2 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.02 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=1_0 , _UpperCamelCase=None , ) -> Any:
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : Union[str, Any] = last_hidden_size
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : int = conv_kernel_size
UpperCAmelCase_ : Dict = output_stride
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[Any] = classifier_dropout_prob
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Any = is_training
UpperCAmelCase_ : Any = num_labels
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Tuple = scope
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : str = None
UpperCAmelCase_ : str = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self ) -> Optional[Any]:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : List[str] = MobileViTModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : List[str] = model(_UpperCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = self.num_labels
UpperCAmelCase_ : Dict = MobileViTForImageClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : List[Any] = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : Tuple = self.num_labels
UpperCAmelCase_ : Optional[int] = MobileViTForSemanticSegmentation(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : List[Any] = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCAmelCase_ : Union[str, Any] = model(_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : int = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = config_and_inputs
UpperCAmelCase_ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Optional[Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_snake_case : int = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : Optional[Any] = False
_snake_case : List[str] = False
_snake_case : str = False
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Any = MobileViTModelTester(self )
UpperCAmelCase_ : str = MobileViTConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> str:
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def __UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def __UpperCAmelCase ( self ) -> Optional[int]:
pass
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Any = model_class(_UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self ) -> Optional[int]:
pass
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[str]:
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
UpperCAmelCase_ : List[str] = outputs.hidden_states
UpperCAmelCase_ : Optional[int] = 5
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase_ : Optional[int] = 2
for i in range(len(_UpperCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : str = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase )
@slow
def __UpperCAmelCase ( self ) -> Optional[int]:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = MobileViTModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ) -> str:
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : List[Any] = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : int = model(**_UpperCamelCase )
# verify the logits
UpperCAmelCase_ : Any = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
UpperCAmelCase_ : Optional[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
UpperCAmelCase_ : Tuple = model.to(_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
UpperCAmelCase_ : str = prepare_img()
UpperCAmelCase_ : List[str] = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_UpperCamelCase )
UpperCAmelCase_ : List[str] = outputs.logits
# verify the logits
UpperCAmelCase_ : Any = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = torch.tensor(
[
[[6.97_13, 6.97_86, 7.24_22], [7.28_93, 7.28_25, 7.44_46], [7.65_80, 7.87_97, 7.94_20]],
[[-10.68_69, -10.32_50, -10.34_71], [-10.42_28, -9.98_68, -9.71_32], [-11.04_05, -11.02_21, -10.73_18]],
[[-3.30_89, -2.85_39, -2.67_40], [-3.27_06, -2.56_21, -2.51_08], [-3.25_34, -2.66_15, -2.66_51]],
] , device=_UpperCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Any = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
UpperCAmelCase_ : List[Any] = model.to(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : List[str] = image_processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**_UpperCamelCase )
UpperCAmelCase_ : List[str] = outputs.logits.detach().cpu()
UpperCAmelCase_ : Any = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase , target_sizes=[(5_0, 6_0)] )
UpperCAmelCase_ : Any = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
UpperCAmelCase_ : int = image_processor.post_process_semantic_segmentation(outputs=_UpperCamelCase )
UpperCAmelCase_ : List[str] = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , _UpperCamelCase )
| 145 |
def lowercase__ ( __snake_case : str , __snake_case : int , __snake_case : List[str] ):
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod
else:
UpperCAmelCase_ : Optional[int] = binary_exponentiation(__snake_case , n / 2 , __snake_case )
return (b * b) % mod
# a prime number
__UpperCAmelCase = 701
__UpperCAmelCase = 1000000000
__UpperCAmelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 145 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Tuple = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowercase : Tuple = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowercase : Any = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def __UpperCAmelCase ( self , __a , __a , __a=None , __a=False , __a=False , __a=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__a : Tuple = np.array([re.sub(__a , '' , __a ) for x in predictions] )
__a : List[Any] = np.array([re.sub(__a , '' , __a ) for x in references] )
else:
__a : int = np.asarray(__a )
__a : str = np.asarray(__a )
if ignore_case:
__a : Dict = np.char.lower(__a )
__a : List[str] = np.char.lower(__a )
if ignore_punctuation:
__a : Dict = string.punctuation.maketrans('' , '' , string.punctuation )
__a : Tuple = np.char.translate(__a , table=__a )
__a : Dict = np.char.translate(__a , table=__a )
if ignore_numbers:
__a : Optional[int] = string.digits.maketrans('' , '' , string.digits )
__a : Tuple = np.char.translate(__a , table=__a )
__a : Optional[int] = np.char.translate(__a , table=__a )
__a : Any = predictions == references
return {"exact_match": np.mean(__a ) * 100}
| 27 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__snake_case : Dict = TypeVar('T')
class lowerCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase_ : T ) -> Tuple:
'''simple docstring'''
A__ : Union[str, Any] =data
A__ : Node[T] | None =None
def __str__( self : int ) -> str:
'''simple docstring'''
return f"{self.data}"
class lowerCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
'''simple docstring'''
A__ : Node[T] | None =None
def __iter__( self : Dict ) -> Iterator[T]:
'''simple docstring'''
A__ : Optional[int] =self.top
while node:
yield node.data
A__ : Union[str, Any] =node.next
def __str__( self : Union[str, Any] ) -> str:
'''simple docstring'''
return "->".join([str(lowerCAmelCase_ ) for item in self] )
def __len__( self : Dict ) -> int:
'''simple docstring'''
return len(tuple(iter(self ) ) )
def lowercase__ ( self : Dict ) -> bool:
'''simple docstring'''
return self.top is None
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : T ) -> None:
'''simple docstring'''
A__ : Tuple =Node(lowerCAmelCase_ )
if not self.is_empty():
A__ : Union[str, Any] =self.top
A__ : Dict =node
def lowercase__ ( self : Dict ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , lowerCAmelCase_ )
A__ : Optional[int] =self.top
A__ : int =self.top.next
return pop_node.data
def lowercase__ ( self : Optional[Any] ) -> T:
'''simple docstring'''
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def lowercase__ ( self : int ) -> None:
'''simple docstring'''
A__ : Optional[int] =None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 136 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
__snake_case : Optional[Any] = None
__snake_case : Optional[Any] = {
'7B': 1_1008,
'13B': 1_3824,
'30B': 1_7920,
'65B': 2_2016,
'70B': 2_8672,
}
__snake_case : Union[str, Any] = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def __lowerCamelCase ( __snake_case : Optional[Any], __snake_case : str=1, __snake_case : Tuple=256 ) -> str:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def __lowerCamelCase ( __snake_case : Tuple ) -> Tuple:
"""simple docstring"""
with open(__snake_case, """r""" ) as f:
return json.load(__snake_case )
def __lowerCamelCase ( __snake_case : Optional[int], __snake_case : Tuple ) -> Dict:
"""simple docstring"""
with open(__snake_case, """w""" ) as f:
json.dump(__snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Any, __snake_case : Any, __snake_case : Tuple=True ) -> Any:
"""simple docstring"""
os.makedirs(__snake_case, exist_ok=__snake_case )
A__ : List[Any] =os.path.join(__snake_case, """tmp""" )
os.makedirs(__snake_case, exist_ok=__snake_case )
A__ : Dict =read_json(os.path.join(__snake_case, """params.json""" ) )
A__ : Dict =NUM_SHARDS[model_size]
A__ : List[str] =params["""n_layers"""]
A__ : int =params["""n_heads"""]
A__ : str =n_heads // num_shards
A__ : Tuple =params["""dim"""]
A__ : Union[str, Any] =dim // n_heads
A__ : str =1_00_00.0
A__ : Any =1.0 / (base ** (torch.arange(0, __snake_case, 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
A__ : Optional[Any] =params["""n_kv_heads"""] # for GQA / MQA
A__ : int =n_heads_per_shard // num_key_value_heads
A__ : int =dim // num_key_value_heads
else: # compatibility with other checkpoints
A__ : List[Any] =n_heads
A__ : List[str] =n_heads_per_shard
A__ : Dict =dim
# permute for sliced rotary
def permute(__snake_case : Tuple, __snake_case : Optional[int]=n_heads, __snake_case : int=dim, __snake_case : Optional[Any]=dim ):
return w.view(__snake_case, dima // n_heads // 2, 2, __snake_case ).transpose(1, 2 ).reshape(__snake_case, __snake_case )
print(f"Fetching all parameters from the checkpoint at {input_base_path}." )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
A__ : List[str] =torch.load(os.path.join(__snake_case, """consolidated.00.pth""" ), map_location="""cpu""" )
else:
# Sharded
A__ : Optional[Any] =[
torch.load(os.path.join(__snake_case, f"consolidated.{i:02d}.pth" ), map_location="""cpu""" )
for i in range(__snake_case )
]
A__ : Optional[Any] =0
A__ : str ={"""weight_map""": {}}
for layer_i in range(__snake_case ):
A__ : Dict =f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
A__ : Dict ={
f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wq.weight"] ),
f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
loaded[f"layers.{layer_i}.attention.wk.weight"] ),
f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
A__ : Any ={
f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][
f"layers.{layer_i}.attention_norm.weight"
].clone(),
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
f"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
A__ : Optional[Any] =permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(__snake_case, __snake_case, __snake_case )
for i in range(__snake_case )
], dim=0, ).reshape(__snake_case, __snake_case ) )
A__ : int =permute(
torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(
__snake_case, __snake_case, __snake_case )
for i in range(__snake_case )
], dim=0, ).reshape(__snake_case, __snake_case ), __snake_case, __snake_case, __snake_case, )
A__ : int =torch.cat(
[
loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(
__snake_case, __snake_case, __snake_case )
for i in range(__snake_case )
], dim=0, ).reshape(__snake_case, __snake_case )
A__ : List[str] =torch.cat(
[loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(__snake_case )], dim=1 )
A__ : Optional[int] =torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(__snake_case )], dim=0 )
A__ : str =torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(__snake_case )], dim=1 )
A__ : List[str] =torch.cat(
[loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(__snake_case )], dim=0 )
A__ : List[Any] =inv_freq
for k, v in state_dict.items():
A__ : Optional[Any] =filename
param_count += v.numel()
torch.save(__snake_case, os.path.join(__snake_case, __snake_case ) )
A__ : Tuple =f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
if model_size == "7B":
# Unsharded
A__ : Tuple ={
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
A__ : Any ={
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(__snake_case )], dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(__snake_case )], dim=0 ),
}
for k, v in state_dict.items():
A__ : int =filename
param_count += v.numel()
torch.save(__snake_case, os.path.join(__snake_case, __snake_case ) )
# Write configs
A__ : Union[str, Any] ={"""total_size""": param_count * 2}
write_json(__snake_case, os.path.join(__snake_case, """pytorch_model.bin.index.json""" ) )
A__ : Optional[Any] =params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
A__ : List[Any] =params["""multiple_of"""] if """multiple_of""" in params else 256
A__ : int =LlamaConfig(
hidden_size=__snake_case, intermediate_size=compute_intermediate_size(__snake_case, __snake_case, __snake_case ), num_attention_heads=params["""n_heads"""], num_hidden_layers=params["""n_layers"""], rms_norm_eps=params["""norm_eps"""], num_key_value_heads=__snake_case, )
config.save_pretrained(__snake_case )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
A__ : List[Any] =LlamaForCausalLM.from_pretrained(__snake_case, torch_dtype=torch.floataa, low_cpu_mem_usage=__snake_case )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(__snake_case, safe_serialization=__snake_case )
shutil.rmtree(__snake_case )
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Dict ) -> Tuple:
"""simple docstring"""
A__ : List[Any] =LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"Saving a {tokenizer_class.__name__} to {tokenizer_path}." )
A__ : List[str] =tokenizer_class(__snake_case )
tokenizer.save_pretrained(__snake_case )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : List[str] =argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""", help="""Location of LLaMA weights, which contains tokenizer.model and model folders""", )
parser.add_argument(
"""--model_size""", choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""], )
parser.add_argument(
"""--output_dir""", help="""Location to write HF model and tokenizer""", )
parser.add_argument("""--safe_serialization""", type=__snake_case, help="""Whether or not to save using `safetensors`.""" )
A__ : Any =parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size ), model_size=args.model_size, safe_serialization=args.safe_serialization, )
A__ : List[Any] =os.path.join(args.input_dir, """tokenizer.model""" )
write_tokenizer(args.output_dir, __snake_case )
if __name__ == "__main__":
main()
| 136 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
SCREAMING_SNAKE_CASE_: int =logging.get_logger(__name__)
class __A ( UpperCamelCase__ ):
def __init__(self : Any , *__a : Optional[Any] , **__a : List[str] ):
warnings.warn(
"The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use OwlViTImageProcessor instead." , __a , )
super().__init__(*__a , **__a )
| 1 | '''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ):
UpperCAmelCase_ = 1.0 if scale is None else scale
UpperCAmelCase_ = 0.0 if loc is None else loc
super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] )
@property
def _lowercase (self : Union[str, Any] ):
return self.base_dist.mean * self.scale + self.loc
@property
def _lowercase (self : List[Any] ):
return self.base_dist.variance * self.scale**2
@property
def _lowercase (self : List[Any] ):
return self.variance.sqrt()
class __A ( nn.Module ):
def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ):
super().__init__(**__a )
UpperCAmelCase_ = args_dim
UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] )
UpperCAmelCase_ = domain_map
def _lowercase (self : List[str] , __a : torch.Tensor ):
UpperCAmelCase_ = [proj(__a ) for proj in self.proj]
return self.domain_map(*__a )
class __A ( nn.Module ):
def __init__(self : Union[str, Any] , __a : List[str] ):
super().__init__()
UpperCAmelCase_ = function
def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ):
return self.function(__a , *__a )
class __A :
a__ : type
a__ : int
a__ : Dict[str, int]
def __init__(self : List[Any] , __a : int = 1 ):
UpperCAmelCase_ = dim
UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def _lowercase (self : Any , __a : Any ):
if self.dim == 1:
return self.distribution_class(*__a )
else:
return Independent(self.distribution_class(*__a ) , 1 )
def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ):
UpperCAmelCase_ = self._base_distribution(__a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim )
@property
def _lowercase (self : Any ):
return () if self.dim == 1 else (self.dim,)
@property
def _lowercase (self : Dict ):
return len(self.event_shape )
@property
def _lowercase (self : Tuple ):
return 0.0
def _lowercase (self : List[str] , __a : int ):
return ParameterProjection(
in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _lowercase (self : Optional[int] , *__a : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def _lowercase (__a : torch.Tensor ):
return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
a__ : type = StudentT
@classmethod
def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase_ = 2.0 + cls.squareplus(__a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"loc": 1, "scale": 1}
a__ : type = Normal
@classmethod
def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"total_count": 1, "logits": 1}
a__ : type = NegativeBinomial
@classmethod
def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _lowercase (self : List[str] , __a : str ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a , logits=__a )
else:
return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 )
def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 1 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
SCREAMING_SNAKE_CASE__ : str = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
SCREAMING_SNAKE_CASE__ : int = {"facebook/blenderbot_small-90M": 512}
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Tuple:
__lowerCamelCase = set()
__lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase = char
__lowerCamelCase = set(__lowerCAmelCase )
return pairs
class lowerCAmelCase__ ( __lowercase ):
a__ : List[Any] = VOCAB_FILES_NAMES
a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple="__start__" , SCREAMING_SNAKE_CASE__ : Tuple="__end__" , SCREAMING_SNAKE_CASE__ : List[str]="__unk__" , SCREAMING_SNAKE_CASE__ : str="__null__" , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]:
super().__init__(unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle:
__lowerCamelCase = json.load(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle:
__lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1]
__lowerCamelCase = [tuple(merge.split() ) for merge in merges]
__lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__lowerCamelCase = {}
@property
def __A ( self : Dict ) -> int:
return len(self.encoder )
def __A ( self : str ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> str:
if token in self.cache:
return self.cache[token]
__lowerCamelCase = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE__ )
if "\n" in token:
__lowerCamelCase = token.replace('''\n''' , ''' __newln__''' )
__lowerCamelCase = token.split(''' ''' )
__lowerCamelCase = []
for token in tokens:
if not len(SCREAMING_SNAKE_CASE__ ):
continue
__lowerCamelCase = token.lower()
__lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
__lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
words.append(SCREAMING_SNAKE_CASE__ )
continue
while True:
__lowerCamelCase = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase = bigram
__lowerCamelCase = []
__lowerCamelCase = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
__lowerCamelCase = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
new_word.extend(word[i:j] )
__lowerCamelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
__lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = '''@@ '''.join(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = word[:-4]
__lowerCamelCase = word
words.append(SCREAMING_SNAKE_CASE__ )
return " ".join(SCREAMING_SNAKE_CASE__ )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
__lowerCamelCase = []
__lowerCamelCase = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE__ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) ) )
return split_tokens
def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> int:
__lowerCamelCase = token.lower()
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> str:
return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
__lowerCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE__ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' )
__lowerCamelCase = 0
with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
__lowerCamelCase = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 339 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class lowerCAmelCase__ ( __lowercase ):
a__ : Union[str, Any] = """open-llama"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=10_00_00 , SCREAMING_SNAKE_CASE__ : Any=40_96 , SCREAMING_SNAKE_CASE__ : Any=1_10_08 , SCREAMING_SNAKE_CASE__ : Tuple=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Any="silu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=20_48 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-6 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Dict:
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = intermediate_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = initializer_range
__lowerCamelCase = rms_norm_eps
__lowerCamelCase = use_cache
__lowerCamelCase = kwargs.pop(
'''use_memorry_efficient_attention''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_dropout_prob
__lowerCamelCase = use_stable_embedding
__lowerCamelCase = shared_input_output_embedding
__lowerCamelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : Dict ) -> Optional[int]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f'''got {self.rope_scaling}''' )
__lowerCamelCase = self.rope_scaling.get('''type''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.rope_scaling.get('''factor''' , SCREAMING_SNAKE_CASE__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 339 | 1 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> str:
SCREAMING_SNAKE_CASE_ = 10
SCREAMING_SNAKE_CASE_ = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
SCREAMING_SNAKE_CASE_ = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(__UpperCAmelCase ) ),
} , features=__UpperCAmelCase , )
return dataset
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : str ) -> int:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=__UpperCAmelCase )
return filename
# FILE_CONTENT + files
lowerCamelCase__ : List[Any] = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Any ) -> List[str]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt'
SCREAMING_SNAKE_CASE_ = FILE_CONTENT
with open(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase )
return filename
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> List[str]:
import bza
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with bza.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> Any:
import gzip
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with gzip.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> int:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with lza.frame.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] ) -> Any:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(__UpperCAmelCase , 'w' ) as archive:
archive.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : str ) -> str:
import tarfile
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> List[Any]:
import lzma
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with lzma.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> str:
import zipfile
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> Any:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
SCREAMING_SNAKE_CASE_ = bytes(__UpperCAmelCase , 'utf-8' )
with zstd.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'file.xml'
SCREAMING_SNAKE_CASE_ = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase )
return filename
lowerCamelCase__ : Optional[Any] = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
lowerCamelCase__ : Dict = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
lowerCamelCase__ : Optional[int] = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
lowerCamelCase__ : List[Any] = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
lowerCamelCase__ : List[Any] = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> Tuple:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = datasets.Dataset.from_dict(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(__UpperCAmelCase ) ) as con:
SCREAMING_SNAKE_CASE_ = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(__UpperCAmelCase , 'w' , newline='' ) as f:
SCREAMING_SNAKE_CASE_ = csv.DictWriter(__UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(__UpperCAmelCase , 'w' , newline='' ) as f:
SCREAMING_SNAKE_CASE_ = csv.DictWriter(__UpperCAmelCase , fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Tuple ) -> str:
import bza
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(__UpperCAmelCase , 'rb' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(__UpperCAmelCase , 'wb' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
SCREAMING_SNAKE_CASE_ = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(__UpperCAmelCase , 'wb' ) as f:
SCREAMING_SNAKE_CASE_ = pq.ParquetWriter(__UpperCAmelCase , schema=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(__UpperCAmelCase ) )] for k in DATA[0]} , schema=__UpperCAmelCase )
writer.write_table(__UpperCAmelCase )
writer.close()
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple ) -> Any:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
SCREAMING_SNAKE_CASE_ = {'data': DATA}
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
SCREAMING_SNAKE_CASE_ = {'data': DATA_DICT_OF_LISTS}
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA_312:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(__UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] ) -> Union[str, Any]:
import gzip
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(__UpperCAmelCase , 'rb' ) as orig_file:
with gzip.open(__UpperCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] ) -> List[str]:
import gzip
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(__UpperCAmelCase , 'rb' ) as orig_file:
with gzip.open(__UpperCAmelCase , 'wb' ) as zipped_file:
zipped_file.writelines(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ) -> Any:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.add(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> List[str]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(__UpperCAmelCase , 'w' ) as f:
f.add(__UpperCAmelCase , arcname=os.path.join('nested' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> Any:
SCREAMING_SNAKE_CASE_ = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(__UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> int:
SCREAMING_SNAKE_CASE_ = ['0', '1', '2', '3']
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(__UpperCAmelCase , 'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] ) -> Any:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
f.write(__UpperCAmelCase , arcname=os.path.join('main_dir' , os.path.basename(__UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename('unsupported.ext' ) )
f.write(__UpperCAmelCase , arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
SCREAMING_SNAKE_CASE_ = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(__UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> List[Any]:
return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( ) -> Tuple:
return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Dict ) -> int:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(__UpperCAmelCase , 'w' ) as f:
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ) )
f.write(__UpperCAmelCase , arcname=os.path.basename(__UpperCAmelCase ).replace('.jpg' , '2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Dict:
SCREAMING_SNAKE_CASE_ = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f:
f.write('bar\n' * 10 )
return data_dir | 225 |
from __future__ import annotations
from math import gcd
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return (pow(__UpperCAmelCase , 2 ) + step) % modulus
for _ in range(__UpperCAmelCase ):
# These track the position within the cycle detection logic.
SCREAMING_SNAKE_CASE_ = seed
SCREAMING_SNAKE_CASE_ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
SCREAMING_SNAKE_CASE_ = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = rand_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
SCREAMING_SNAKE_CASE_ = gcd(hare - tortoise , __UpperCAmelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
SCREAMING_SNAKE_CASE_ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
lowerCamelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
lowerCamelCase__ : Tuple = parser.parse_args()
lowerCamelCase__ : Any = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'''{args.num} is probably prime''')
else:
lowerCamelCase__ : Tuple = args.num // divisor
print(f'''{args.num} = {divisor} * {quotient}''') | 225 | 1 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCAmelCase = get_tests_dir("fixtures")
_lowerCAmelCase = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
_lowerCAmelCase = get_tests_dir("fixtures/dummy-config.json")
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : Union[str, Any] ):
__magic_name__ = 0
def snake_case__ ( self : Optional[int] ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ ).to_dict()
config_dict.pop('''feature_extractor_type''' )
__magic_name__ = WavaVecaFeatureExtractor(**a__ )
# save in new folder
model_config.save_pretrained(a__ )
config.save_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
# make sure private variable is not incorrectly saved
__magic_name__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : Optional[Any] ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def snake_case__ ( self : str ):
with self.assertRaisesRegex(
a__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def snake_case__ ( self : str ):
with self.assertRaisesRegex(
a__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ , revision='''aaaaaa''' )
def snake_case__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
a__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__magic_name__ = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def snake_case__ ( self : Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a__ ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a__ ):
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ , trust_remote_code=a__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def snake_case__ ( self : int ):
try:
AutoConfig.register('''custom''' , a__ )
AutoFeatureExtractor.register(a__ , a__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a__ ):
AutoFeatureExtractor.register(a__ , a__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__magic_name__ = CustomFeatureExtractor.from_pretrained(a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a__ )
__magic_name__ = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case__ ( self : int ):
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[int] = True
try:
AutoConfig.register('''custom''' , a__ )
AutoFeatureExtractor.register(a__ , a__ )
# If remote code is not set, the default is to use local
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__magic_name__ = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(a__ , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 360 |
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_lowerCAmelCase = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_lowerCAmelCase = 10
_lowerCAmelCase = 256
def UpperCamelCase ( a ) -> Optional[MinHash]:
'''simple docstring'''
if len(a ) < MIN_NUM_TOKENS:
return None
__magic_name__ = MinHash(num_perm=a )
for token in set(a ):
min_hash.update(token.encode() )
return min_hash
def UpperCamelCase ( a ) -> Set[str]:
'''simple docstring'''
return {t for t in NON_ALPHA.split(a ) if len(t.strip() ) > 0}
class _SCREAMING_SNAKE_CASE :
def __init__( self : Any , *,
a__ : float = 0.85 , ):
__magic_name__ = duplication_jaccard_threshold
__magic_name__ = NUM_PERM
__magic_name__ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__magic_name__ = defaultdict(a__ )
def snake_case__ ( self : int , a__ : Tuple , a__ : MinHash ):
__magic_name__ = self._index.query(a__ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def snake_case__ ( self : Optional[int] ):
__magic_name__ = []
for base, duplicates in self._duplicate_clusters.items():
__magic_name__ = [base] + list(a__ )
# reformat the cluster to be a list of dict
__magic_name__ = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def snake_case__ ( self : int , a__ : Tuple ):
__magic_name__ = self.get_duplicate_clusters()
with open(a__ , '''w''' ) as f:
json.dump(a__ , a__ )
def UpperCamelCase ( a ) -> List[Any]:
'''simple docstring'''
__magic_name__ , __magic_name__ = element
__magic_name__ = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def UpperCamelCase ( a ) -> List[Any]:
'''simple docstring'''
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(a , max_queue_size=1_0000 ) , chunksize=100 , ):
if data is not None:
yield data
def UpperCamelCase ( a , a ) -> Tuple:
'''simple docstring'''
__magic_name__ = DuplicationIndex(duplication_jaccard_threshold=a )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(a ) ) , max_queue_size=100 ) ):
di.add(a , a )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def UpperCamelCase ( a , a ) -> float:
'''simple docstring'''
__magic_name__ = get_tokens(a )
__magic_name__ = get_tokens(a )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_lowerCAmelCase = None
def UpperCamelCase ( a , a ) -> Dict:
'''simple docstring'''
__magic_name__ = []
for elementa in cluster:
__magic_name__ = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__magic_name__ = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(a , a ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__magic_name__ = 1
extremes.append(a )
return extremes
def UpperCamelCase ( a , a , a ) -> Optional[Any]:
'''simple docstring'''
global _shared_dataset
__magic_name__ = dataset
__magic_name__ = []
__magic_name__ = partial(_find_cluster_extremes_shared , jaccard_threshold=a )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
a , a , ) , total=len(a ) , ):
extremes_list.append(a )
return extremes_list
def UpperCamelCase ( a , a = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
'''simple docstring'''
__magic_name__ = make_duplicate_clusters(a , a )
__magic_name__ = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__magic_name__ = {}
__magic_name__ = find_extremes(a , a , a )
for extremes in extremes_clusters:
for element in extremes:
__magic_name__ = element
__magic_name__ = duplicate_indices - set(extreme_dict.keys() )
__magic_name__ = dataset.filter(lambda a , a : idx not in remove_indices , with_indices=a )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__magic_name__ = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__magic_name__ = extreme_dict[element['''base_index''']]['''copies''']
print(F'''Original dataset size: {len(a )}''' )
print(F'''Number of duplicate clusters: {len(a )}''' )
print(F'''Files in duplicate cluster: {len(a )}''' )
print(F'''Unique files in duplicate cluster: {len(a )}''' )
print(F'''Filtered dataset size: {len(a )}''' )
return ds_filter, duplicate_clusters
| 98 | 0 |
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 136 |
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True ) -> List[Any]:
'''simple docstring'''
model.train()
lowercase_ = model(__lowerCAmelCase )
lowercase_ = F.mse_loss(__lowerCAmelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> List[Any]:
'''simple docstring'''
set_seed(42 )
lowercase_ = RegressionModel()
lowercase_ = deepcopy(__lowerCAmelCase )
lowercase_ = RegressionDataset(length=80 )
lowercase_ = DataLoader(__lowerCAmelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
lowercase_ = AdamW(params=model.parameters() , lr=1E-3 )
lowercase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
lowercase_ = LambdaLR(__lowerCAmelCase , lr_lambda=lambda __lowerCAmelCase : epoch**0.65 )
lowercase_ = LambdaLR(__lowerCAmelCase , lr_lambda=lambda __lowerCAmelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
lowercase_ , lowercase_ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ = get_training_setup(__lowerCAmelCase )
# Use a single batch
lowercase_ , lowercase_ = next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
lowercase_ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase_ , lowercase_ , lowercase_ = get_training_setup(__lowerCAmelCase )
# Use a single batch
lowercase_ , lowercase_ = next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
lowercase_ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=False , __lowerCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase_ , lowercase_ , lowercase_ = get_training_setup(__lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
lowercase_ , lowercase_ = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
lowercase_ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=False , __lowerCAmelCase=False ) -> Optional[int]:
'''simple docstring'''
lowercase_ = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = get_training_setup(__lowerCAmelCase , __lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
lowercase_ , lowercase_ = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
lowercase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE () -> Optional[Any]:
'''simple docstring'''
lowercase_ = Accelerator()
lowercase_ = RegressionDataset(length=80 )
lowercase_ = DataLoader(__lowerCAmelCase , batch_size=16 )
lowercase_ = RegressionDataset(length=96 )
lowercase_ = DataLoader(__lowerCAmelCase , batch_size=16 )
lowercase_ , lowercase_ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if iteration < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if batch_num < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _SCREAMING_SNAKE_CASE () -> List[str]:
'''simple docstring'''
lowercase_ = Accelerator()
lowercase_ = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(__lowerCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(__lowerCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(__lowerCAmelCase , __lowerCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCAmelCase , __lowerCAmelCase )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 136 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a_ : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 |
'''simple docstring'''
a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a_ : list[bool | None] = [None] * 10_00_00_00
a_ : List[Any] = True
a_ : Optional[Any] = False
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase_ =chain(next_number(__snake_case ) )
lowerCamelCase_ =number_chain
while number < 1000_0000:
lowerCamelCase_ =number_chain
number *= 10
return number_chain
def a_ ( __snake_case : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 6 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""PerceiverFeatureExtractor"""]
lowerCamelCase = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = """▁"""
lowerCamelCase = {"""vocab_file""": """spiece.model"""}
lowerCamelCase = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
lowerCamelCase = {
"""google/reformer-crime-and-punishment""": 52_4288,
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]="</s>" , _lowerCAmelCase : Any="<unk>" , _lowerCAmelCase : int=[] , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : List[Any] , ):
'''simple docstring'''
__lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
__lowercase =vocab_file
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_lowerCAmelCase)
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__lowercase ={self.convert_ids_to_tokens(_lowerCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Any):
'''simple docstring'''
__lowercase =self.__dict__.copy()
__lowercase =None
return state
def __setstate__( self : Optional[int] , _lowerCAmelCase : Union[str, Any]):
'''simple docstring'''
__lowercase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__lowercase ={}
__lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : str):
'''simple docstring'''
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase)
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : List[Any]):
'''simple docstring'''
return self.sp_model.piece_to_id(_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : Optional[Any]):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
__lowercase =self.sp_model.IdToPiece(_lowerCAmelCase)
return token
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase =[]
__lowercase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCAmelCase) + token
__lowercase =[]
else:
current_sub_tokens.append(_lowerCAmelCase)
out_string += self.sp_model.decode(_lowerCAmelCase)
return out_string.strip()
def __lowerCamelCase ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(_lowerCAmelCase):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
__lowercase =os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _lowerCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(_lowerCAmelCase , 'wb') as fi:
__lowercase =self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase)
return (out_vocab_file,)
| 166 | 1 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_a : Tuple = StableDiffusionControlNetImgaImgPipeline
_a : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
_a : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
_a : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
__lowerCAmelCase = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0 )
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__lowerCAmelCase = CLIPTextModel(_A )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowerCAmelCase = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(_A )
else:
__lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
__lowerCAmelCase = 2
__lowerCAmelCase = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=_A , device=torch.device(_A ) , )
__lowerCAmelCase = floats_tensor(control_image.shape , rng=random.Random(_A ) ).to(_A )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((6_4, 6_4) )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class a__ ( snake_case__ , snake_case__ , unittest.TestCase ):
_a : Union[str, Any] = StableDiffusionControlNetImgaImgPipeline
_a : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
_a : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_a : Any = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
def init_weights(_A ):
if isinstance(_A , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__lowerCAmelCase = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(_A )
torch.manual_seed(0 )
__lowerCAmelCase = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(_A )
torch.manual_seed(0 )
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
__lowerCAmelCase = CLIPTextModel(_A )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowerCAmelCase = MultiControlNetModel([controlneta, controlneta] )
__lowerCAmelCase = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(_A )
else:
__lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
__lowerCAmelCase = 2
__lowerCAmelCase = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=_A , device=torch.device(_A ) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=_A , device=torch.device(_A ) , ),
]
__lowerCAmelCase = floats_tensor(control_image[0].shape , rng=random.Random(_A ) ).to(_A )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((6_4, 6_4) )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
__lowerCAmelCase = 10.0
__lowerCAmelCase = 4
__lowerCAmelCase = self.get_dummy_inputs(_A )
__lowerCAmelCase = steps
__lowerCAmelCase = scale
__lowerCAmelCase = pipe(**_A )[0]
__lowerCAmelCase = self.get_dummy_inputs(_A )
__lowerCAmelCase = steps
__lowerCAmelCase = scale
__lowerCAmelCase = pipe(**_A , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__lowerCAmelCase = self.get_dummy_inputs(_A )
__lowerCAmelCase = steps
__lowerCAmelCase = scale
__lowerCAmelCase = pipe(**_A , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__lowerCAmelCase = self.get_dummy_inputs(_A )
__lowerCAmelCase = steps
__lowerCAmelCase = scale
__lowerCAmelCase = pipe(**_A , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_A )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
__lowerCAmelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=_A , controlnet=_A )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase = "evil space-punk bird"
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((5_1_2, 5_1_2) )
__lowerCAmelCase = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((5_1_2, 5_1_2) )
__lowerCAmelCase = pipe(
_A , _A , control_image=_A , generator=_A , output_type="np" , num_inference_steps=5_0 , strength=0.6 , )
__lowerCAmelCase = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9E-2
| 366 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
UpperCamelCase__ = """\
"""
UpperCamelCase__ = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
UpperCamelCase__ = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A = 1_6 , _A = True , _A=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase = "cuda"
else:
__lowerCAmelCase = "cuda" if torch.cuda.is_available() else "cpu"
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(_A )
__lowerCAmelCase = model.to(_A )
__lowerCAmelCase = AutoTokenizer.from_pretrained(_A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase = model.config.max_length - 1
else:
__lowerCAmelCase = model.config.max_length
__lowerCAmelCase = tokenizer(
_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , return_tensors="pt" , return_attention_mask=_A , ).to(_A )
__lowerCAmelCase = encodings["input_ids"]
__lowerCAmelCase = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase = []
__lowerCAmelCase = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(_A ) , _A ) ):
__lowerCAmelCase = min(start_index + batch_size , len(_A ) )
__lowerCAmelCase = encoded_texts[start_index:end_index]
__lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_A )
__lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_A ), attn_mask] , dim=1 )
__lowerCAmelCase = encoded_batch
with torch.no_grad():
__lowerCAmelCase = model(_A , attention_mask=_A ).logits
__lowerCAmelCase = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = attn_mask[..., 1:].contiguous()
__lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_A )}
| 102 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 30 |
import enum
import shutil
import sys
UpperCAmelCase, UpperCAmelCase : Union[str, Any] = shutil.get_terminal_size()
UpperCAmelCase : Dict = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class __lowercase ( enum.Enum ):
"""simple docstring"""
UpperCamelCase : Any = 0
UpperCamelCase : int = 1
def __lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any="" ):
'''simple docstring'''
sys.stdout.write(str(lowerCamelCase__ ) + end )
sys.stdout.flush()
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple="" ):
'''simple docstring'''
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , lowerCamelCase__ )
def __lowerCamelCase ( ):
'''simple docstring'''
forceWrite("""\r""" )
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : str ):
'''simple docstring'''
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def __lowerCamelCase ( ):
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def __lowerCamelCase ( ):
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 252 | 0 |
'''simple docstring'''
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : Any = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 37 |
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( _UpperCAmelCase ):
if not nums:
raise ValueError("List is empty" )
return sum(_UpperCAmelCase ) / len(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Tuple = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a__ : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = state_dict.pop(A_ )
__UpperCamelCase = val
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__UpperCamelCase = key.replace("""backbone.0.body""" ,"""backbone.conv_encoder.model""" )
__UpperCamelCase = value
else:
__UpperCamelCase = value
return new_state_dict
def _lowercase ( __A ,__A=False ):
'''simple docstring'''
__UpperCamelCase = ""
if is_panoptic:
__UpperCamelCase = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__UpperCamelCase = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__UpperCamelCase = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase = in_proj_weight[:256, :]
__UpperCamelCase = in_proj_bias[:256]
__UpperCamelCase = in_proj_weight[256:512, :]
__UpperCamelCase = in_proj_bias[256:512]
__UpperCamelCase = in_proj_weight[-256:, :]
__UpperCamelCase = in_proj_bias[-256:]
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCamelCase = Image.open(requests.get(A_ ,stream=A_ ).raw )
return im
@torch.no_grad()
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__UpperCamelCase = "resnet101"
if "dc5" in model_name:
__UpperCamelCase = True
__UpperCamelCase = "panoptic" in model_name
if is_panoptic:
__UpperCamelCase = 250
else:
__UpperCamelCase = 91
__UpperCamelCase = "huggingface/label-files"
__UpperCamelCase = "coco-detection-id2label.json"
__UpperCamelCase = json.load(open(hf_hub_download(A_ ,A_ ,repo_type="""dataset""" ) ,"""r""" ) )
__UpperCamelCase = {int(A_ ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
# load image processor
__UpperCamelCase = "coco_panoptic" if is_panoptic else "coco_detection"
__UpperCamelCase = ConditionalDetrImageProcessor(format=A_ )
# prepare image
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=A_ ,return_tensors="""pt""" )
__UpperCamelCase = encoding["pixel_values"]
logger.info(f"Converting model {model_name}..." )
# load original model from torch hub
__UpperCamelCase = torch.hub.load("""DeppMeng/ConditionalDETR""" ,A_ ,pretrained=A_ ).eval()
__UpperCamelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__UpperCamelCase = "conditional_detr." + src
rename_key(A_ ,A_ ,A_ )
__UpperCamelCase = rename_backbone_keys(A_ )
# query, key and value matrices need special treatment
read_in_q_k_v(A_ ,is_panoptic=A_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__UpperCamelCase = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
__UpperCamelCase = state_dict.pop(A_ )
__UpperCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__UpperCamelCase = state_dict.pop(A_ )
__UpperCamelCase = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
__UpperCamelCase = state_dict.pop(A_ )
__UpperCamelCase = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
__UpperCamelCase = state_dict.pop(A_ )
__UpperCamelCase = val
# finally, create HuggingFace model and load state dict
__UpperCamelCase = ConditionalDetrForSegmentation(A_ ) if is_panoptic else ConditionalDetrForObjectDetection(A_ )
model.load_state_dict(A_ )
model.eval()
model.push_to_hub(repo_id=A_ ,organization="""DepuMeng""" ,commit_message="""Add model""" )
# verify our conversion
__UpperCamelCase = conditional_detr(A_ )
__UpperCamelCase = model(A_ )
assert torch.allclose(outputs.logits ,original_outputs["""pred_logits"""] ,atol=1E-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["""pred_boxes"""] ,atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["""pred_masks"""] ,atol=1E-4 )
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
image_processor.save_pretrained(A_ )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
a__ : List[str] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 349 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A__: str = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Union[str, Any] = ['''YolosFeatureExtractor''']
A__: Optional[int] = ['''YolosImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: str = [
'''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''YolosForObjectDetection''',
'''YolosModel''',
'''YolosPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
A__: Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 149 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : int = {"""vocab_file""": """vocab.json"""}
a : Dict = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
a : str = {"""mgp-str""": 27}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , snake_case__ , snake_case__="[GO]" , snake_case__="[GO]" , snake_case__="[s]" , snake_case__="[GO]" , **snake_case__ ):
'''simple docstring'''
super().__init__(
unk_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , pad_token=snake_case__ , **snake_case__ , )
with open(snake_case__ , encoding="utf-8" ) as vocab_handle:
lowercase__ : Optional[Any]= json.load(snake_case__ )
lowercase__ : List[str]= {v: k for k, v in self.vocab.items()}
@property
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : List[Any]= []
for s in text:
char_tokens.extend(snake_case__ )
return char_tokens
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return self.vocab.get(snake_case__ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
return self.decoder.get(snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error("Vocabulary path ({}) should be a directory".format(snake_case__ ) )
return
lowercase__ : Any= os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(snake_case__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + "\n" )
return (vocab_file,)
| 150 |
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
a : Dict = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
a : List[Any] = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
a : int = r"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
lowercase__ : Optional[int]= spearmanr(snake_case__ , snake_case__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 150 | 1 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
lowerCAmelCase = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
lowerCAmelCase = 'hopper-medium-v2'
lowerCAmelCase = gym.make(env_name)
lowerCAmelCase = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
lowerCAmelCase = env.reset()
lowerCAmelCase = 0
lowerCAmelCase = 0
lowerCAmelCase = 1000
lowerCAmelCase = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
lowerCAmelCase = pipeline(obs, planning_horizon=32)
# execute action in environment
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = env.step(denorm_actions)
lowerCAmelCase = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
f""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
lowerCAmelCase = next_observation
except KeyboardInterrupt:
pass
print(f"""Total reward: {total_reward}""")
| 110 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
lowerCAmelCase = logging.getLogger(__name__)
class _a ( UpperCamelCase__ ):
_lowercase : Union[str, Any] = '''token-classification'''
def __init__( self: int , UpperCamelCase_: Optional[Any] ) -> Dict:
"""simple docstring"""
if type(UpperCamelCase_ ) == dict:
lowercase__ = Namespace(**UpperCamelCase_ )
lowercase__ = import_module('''tasks''' )
try:
lowercase__ = getattr(UpperCamelCase_ , hparams.task_type )
lowercase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
lowercase__ = self.token_classification_task.get_labels(hparams.labels )
lowercase__ = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase_ , len(self.labels ) , self.mode )
def lowerCamelCase_ ( self: Tuple , **UpperCamelCase_: Optional[int] ) -> str:
"""simple docstring"""
return self.model(**UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: List[Any] , UpperCamelCase_: int ) -> int:
"""simple docstring"""
lowercase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowercase__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ = self(**UpperCamelCase_ )
lowercase__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.hparams
for mode in ["train", "dev", "test"]:
lowercase__ = self._feature_file(UpperCamelCase_ )
if os.path.exists(UpperCamelCase_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowercase__ = torch.load(UpperCamelCase_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
lowercase__ = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase_ )
lowercase__ = self.token_classification_task.convert_examples_to_features(
UpperCamelCase_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase_ , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , UpperCamelCase_ )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int , UpperCamelCase_: bool = False ) -> DataLoader:
"""simple docstring"""
lowercase__ = self._feature_file(UpperCamelCase_ )
logger.info('''Loading features from cached file %s''' , UpperCamelCase_ )
lowercase__ = torch.load(UpperCamelCase_ )
lowercase__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowercase__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
lowercase__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
lowercase__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
lowercase__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , batch_size=UpperCamelCase_ )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: int , UpperCamelCase_: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
"""Compute validation""" ""
lowercase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
lowercase__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
lowercase__ = self(**UpperCamelCase_ )
lowercase__ , lowercase__ = outputs[:2]
lowercase__ = logits.detach().cpu().numpy()
lowercase__ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: List[str] ) -> int:
"""simple docstring"""
lowercase__ = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
lowercase__ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
lowercase__ = np.argmax(UpperCamelCase_ , axis=2 )
lowercase__ = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
lowercase__ = dict(enumerate(self.labels ) )
lowercase__ = [[] for _ in range(out_label_ids.shape[0] )]
lowercase__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
lowercase__ = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(UpperCamelCase_ , UpperCamelCase_ ),
'''precision''': precision_score(UpperCamelCase_ , UpperCamelCase_ ),
'''recall''': recall_score(UpperCamelCase_ , UpperCamelCase_ ),
'''f1''': fa_score(UpperCamelCase_ , UpperCamelCase_ ),
}
lowercase__ = dict(results.items() )
lowercase__ = results
return ret, preds_list, out_label_list
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[Any] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = self._eval_end(UpperCamelCase_ )
lowercase__ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Dict ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = self._eval_end(UpperCamelCase_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
lowercase__ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Optional[Any] , UpperCamelCase_: str ) -> Optional[Any]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(UpperCamelCase_ , UpperCamelCase_ )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=UpperCamelCase_ , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=UpperCamelCase_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=UpperCamelCase_ , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=UpperCamelCase_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
lowerCAmelCase = NERTransformer.add_model_specific_args(parser, os.getcwd())
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = NERTransformer(args)
lowerCAmelCase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir, 'checkpoint-epoch=*.ckpt'), recursive=True))
lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 110 | 1 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
snake_case = logging.get_logger(__name__)
snake_case = TypeVar("""DatasetType""", Dataset, IterableDataset)
def lowerCamelCase__ ( lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(snake_case_ ):
if not isinstance(snake_case_ , (Dataset, IterableDataset) ):
if isinstance(snake_case_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"is an empty dataset dictionary." )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(snake_case_ )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case_ ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case_ ).__name__}.''' )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = (
(Dataset, IterableDataset) if isinstance(snake_case_ , snake_case_ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case_ , snake_case_ ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'''{stopping_strategy} is not supported. Please enter a valid stopping_strategy.''' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
snake_case_ , snake_case_ , snake_case_ , info=snake_case_ , split=snake_case_ , stopping_strategy=snake_case_ )
else:
return _interleave_iterable_datasets(
snake_case_ , snake_case_ , snake_case_ , info=snake_case_ , split=snake_case_ , stopping_strategy=snake_case_ )
def lowerCamelCase__ ( lowercase , lowercase = None , lowercase = None , lowercase = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(snake_case_ ):
if not isinstance(snake_case_ , (Dataset, IterableDataset) ):
if isinstance(snake_case_ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '''
"is an empty dataset dictionary." )
raise ValueError(
F'''Dataset at position {i} has at least one split: {list(snake_case_ )}\n'''
F'''Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case_ ) )}\']''' )
raise ValueError(
F'''Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case_ ).__name__}.''' )
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = (
(Dataset, IterableDataset) if isinstance(snake_case_ , snake_case_ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case_ , snake_case_ ):
raise ValueError(
F'''Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.''' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(snake_case_ , info=snake_case_ , split=snake_case_ , axis=snake_case_ )
else:
return _concatenate_iterable_datasets(snake_case_ , info=snake_case_ , split=snake_case_ , axis=snake_case_ )
| 368 |
from math import sqrt
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(1 , int(sqrt(lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase ):
total += i + n // i
elif i == sqrt(lowercase ):
total += i
return total - n
def lowerCamelCase__ ( lowercase = 10000 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = sum(
i
for i in range(1 , lowercase )
if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 319 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
A : Dict = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
A : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 6 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 - _cos) / 2
__a = 1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 + _cos) / 2
__a = -1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = _sin / 2
__a = 0
__a = -ba
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 1 - alpha
__a = -2 * _cos
__a = 1 + alpha
__a = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = 1 + alpha * big_a
__a = -2 * _cos
__a = 1 - alpha * big_a
__a = 1 + alpha / big_a
__a = -2 * _cos
__a = 1 - alpha / big_a
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (pmc + aaa)
__a = 2 * big_a * mpc
__a = big_a * (pmc - aaa)
__a = ppmc + aaa
__a = -2 * pmpc
__a = ppmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (ppmc + aaa)
__a = -2 * big_a * pmpc
__a = big_a * (ppmc - aaa)
__a = pmc + aaa
__a = 2 * mpc
__a = pmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt | 6 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "mgp-str"
def __init__( self : Optional[int] , __lowerCamelCase : int=[32, 128] , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Union[str, Any]=27 , __lowerCamelCase : Dict=38 , __lowerCamelCase : Union[str, Any]=5_0257 , __lowerCamelCase : Optional[Any]=3_0522 , __lowerCamelCase : List[str]=768 , __lowerCamelCase : Any=12 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Union[str, Any]=4.0 , __lowerCamelCase : Dict=True , __lowerCamelCase : List[str]=False , __lowerCamelCase : List[Any]=1e-5 , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Optional[Any]=0.0 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Union[str, Any]=0.02 , **__lowerCamelCase : str , ) -> Tuple:
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = max_token_length
SCREAMING_SNAKE_CASE__ = num_character_labels
SCREAMING_SNAKE_CASE__ = num_bpe_labels
SCREAMING_SNAKE_CASE__ = num_wordpiece_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = mlp_ratio
SCREAMING_SNAKE_CASE__ = distilled
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = drop_rate
SCREAMING_SNAKE_CASE__ = qkv_bias
SCREAMING_SNAKE_CASE__ = attn_drop_rate
SCREAMING_SNAKE_CASE__ = drop_path_rate
SCREAMING_SNAKE_CASE__ = output_aa_attentions
SCREAMING_SNAKE_CASE__ = initializer_range
| 218 |
import doctest
from collections import deque
import numpy as np
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] ) -> None:
SCREAMING_SNAKE_CASE__ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4]
def lowercase_ ( self : Optional[int] ) -> list[float]:
SCREAMING_SNAKE_CASE__ = len(self.first_signal )
SCREAMING_SNAKE_CASE__ = len(self.second_signal )
SCREAMING_SNAKE_CASE__ = max(__lowerCamelCase , __lowerCamelCase )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE__ = [[0] * max_length for i in range(__lowerCamelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = deque(self.second_signal )
rotated_signal.rotate(__lowerCamelCase )
for j, item in enumerate(__lowerCamelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE__ = np.matmul(np.transpose(__lowerCamelCase ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__lowerCamelCase , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 218 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _a ( UpperCamelCase_ : bytes , UpperCamelCase_ : int ) -> np.array:
"""simple docstring"""
lowerCAmelCase__ = F"{sampling_rate}"
lowerCAmelCase__ = "1"
lowerCAmelCase__ = "f32le"
lowerCAmelCase__ = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(UpperCamelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
lowerCAmelCase__ = ffmpeg_process.communicate(UpperCamelCase_ )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
lowerCAmelCase__ = output_stream[0]
lowerCAmelCase__ = np.frombuffer(UpperCamelCase_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : float , UpperCamelCase_ : str = "f32le" , ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = F"{sampling_rate}"
lowerCAmelCase__ = "1"
if format_for_conversion == "s16le":
lowerCAmelCase__ = 2
elif format_for_conversion == "f32le":
lowerCAmelCase__ = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
lowerCAmelCase__ = platform.system()
if system == "Linux":
lowerCAmelCase__ = "alsa"
lowerCAmelCase__ = "default"
elif system == "Darwin":
lowerCAmelCase__ = "avfoundation"
lowerCAmelCase__ = ":0"
elif system == "Windows":
lowerCAmelCase__ = "dshow"
lowerCAmelCase__ = "default"
lowerCAmelCase__ = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
lowerCAmelCase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
lowerCAmelCase__ = _ffmpeg_stream(UpperCamelCase_ , UpperCamelCase_ )
for item in iterator:
yield item
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : float , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[Tuple[float, float], float]] = None , UpperCamelCase_ : str = "f32le" , ) -> str:
"""simple docstring"""
if stream_chunk_s is not None:
lowerCAmelCase__ = stream_chunk_s
else:
lowerCAmelCase__ = chunk_length_s
lowerCAmelCase__ = ffmpeg_microphone(UpperCamelCase_ , UpperCamelCase_ , format_for_conversion=UpperCamelCase_ )
if format_for_conversion == "s16le":
lowerCAmelCase__ = np.intaa
lowerCAmelCase__ = 2
elif format_for_conversion == "f32le":
lowerCAmelCase__ = np.floataa
lowerCAmelCase__ = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
lowerCAmelCase__ = chunk_length_s / 6
lowerCAmelCase__ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(UpperCamelCase_ , (int, float) ):
lowerCAmelCase__ = [stride_length_s, stride_length_s]
lowerCAmelCase__ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
lowerCAmelCase__ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
lowerCAmelCase__ = datetime.datetime.now()
lowerCAmelCase__ = datetime.timedelta(seconds=UpperCamelCase_ )
for item in chunk_bytes_iter(UpperCamelCase_ , UpperCamelCase_ , stride=(stride_left, stride_right) , stream=UpperCamelCase_ ):
# Put everything back in numpy scale
lowerCAmelCase__ = np.frombuffer(item["raw"] , dtype=UpperCamelCase_ )
lowerCAmelCase__ = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
lowerCAmelCase__ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple[int, int] , UpperCamelCase_ : bool = False ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = b""
lowerCAmelCase__ , lowerCAmelCase__ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
lowerCAmelCase__ = 0
for raw in iterator:
acc += raw
if stream and len(UpperCamelCase_ ) < chunk_len:
lowerCAmelCase__ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(UpperCamelCase_ ) >= chunk_len:
# We are flushing the accumulator
lowerCAmelCase__ = (_stride_left, stride_right)
lowerCAmelCase__ = {"raw": acc[:chunk_len], "stride": stride}
if stream:
lowerCAmelCase__ = False
yield item
lowerCAmelCase__ = stride_left
lowerCAmelCase__ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(UpperCamelCase_ ) > stride_left:
lowerCAmelCase__ = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
lowerCAmelCase__ = False
yield item
def _a ( UpperCamelCase_ : List[str] , UpperCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = 2**24 # 16Mo
try:
with subprocess.Popen(UpperCamelCase_ , stdout=subprocess.PIPE , bufsize=UpperCamelCase_ ) as ffmpeg_process:
while True:
lowerCAmelCase__ = ffmpeg_process.stdout.read(UpperCamelCase_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 340 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a_ = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
a_ = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
a_ = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase__ ( _UpperCAmelCase ):
a_ =VOCAB_FILES_NAMES
a_ =PRETRAINED_VOCAB_FILES_MAP
a_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ =PRETRAINED_INIT_CONFIGURATION
a_ =["""input_ids""", """attention_mask"""]
a_ =DistilBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , )-> List[str]:
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(__UpperCAmelCase , normalizer_state.pop("type" ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**__UpperCAmelCase )
lowerCAmelCase__ = do_lower_case
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> List[int]:
'''simple docstring'''
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None )-> Tuple[str]:
'''simple docstring'''
lowerCAmelCase__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 340 | 1 |
"""simple docstring"""
from math import asin, atan, cos, radians, sin, sqrt, tan
a : str = 6378137.0
a : str = 6356752.314245
a : Optional[Any] = 6378137
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
a : Dict = (AXIS_A - AXIS_B) / AXIS_A
a : List[str] = atan((1 - flattening) * tan(radians(_lowercase ) ) )
a : Dict = atan((1 - flattening) * tan(radians(_lowercase ) ) )
a : Any = radians(_lowercase )
a : Any = radians(_lowercase )
# Equation
a : Any = sin((phi_a - phi_a) / 2 )
a : Optional[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
a : Any = sqrt(sin_sq_phi + (cos(_lowercase ) * cos(_lowercase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : List[str] = logging.get_logger(__name__)
a : Optional[int] = '''T5Config'''
def _SCREAMING_SNAKE_CASE ( _lowercase : jnp.array , _lowercase : int , _lowercase : int ) ->jnp.ndarray:
'''simple docstring'''
a : Tuple = jnp.zeros_like(_lowercase )
a : Tuple = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
a : Dict = shifted_input_ids.at[:, 0].set(_lowercase )
a : Optional[Any] = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __UpperCamelCase ( a__ ):
lowerCamelCase : Any ="""mt5"""
lowerCamelCase : Dict =MTaConfig
class __UpperCamelCase ( a__ ):
lowerCamelCase : str ="""mt5"""
lowerCamelCase : Tuple =MTaConfig
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] ="""mt5"""
lowerCamelCase : Tuple =MTaConfig
| 79 | 0 |
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : int ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , snake_case__ )
A = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
A = dataset_size < in_memory_max_size
else:
A = False
A = is_small_dataset(snake_case__ )
assert result == expected | 74 | """simple docstring"""
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
_UpperCamelCase : Any = re.compile(r"\b(a|an|the)\b", re.UNICODE)
_UpperCamelCase : Union[str, Any] = None
def a_ ( ):
'''simple docstring'''
lowercase__ : Optional[int] = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.' )
parser.add_argument('data_file' , metavar='data.json' , help='Input data JSON file.' )
parser.add_argument('pred_file' , metavar='pred.json' , help='Model predictions.' )
parser.add_argument(
'--out-file' , '-o' , metavar='eval.json' , help='Write accuracy metrics to file (default is stdout).' )
parser.add_argument(
'--na-prob-file' , '-n' , metavar='na_prob.json' , help='Model estimates of probability of no answer.' )
parser.add_argument(
'--na-prob-thresh' , '-t' , type=_lowerCAmelCase , default=1.0 , help='Predict "" if no-answer probability exceeds this (default = 1.0).' , )
parser.add_argument(
'--out-image-dir' , '-p' , metavar='out_images' , default=_lowerCAmelCase , help='Save precision-recall curves to directory.' )
parser.add_argument('--verbose' , '-v' , action='store_true' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def a_ ( _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Tuple = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase__ : Optional[int] = bool(qa['answers']['text'] )
return qid_to_has_ans
def a_ ( _lowerCAmelCase : Any ):
'''simple docstring'''
def remove_articles(_lowerCAmelCase : int ):
return ARTICLES_REGEX.sub(' ' , _lowerCAmelCase )
def white_space_fix(_lowerCAmelCase : str ):
return " ".join(text.split() )
def remove_punc(_lowerCAmelCase : List[Any] ):
lowercase__ : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCAmelCase : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) )
def a_ ( _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
if not s:
return []
return normalize_answer(_lowerCAmelCase ).split()
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str ):
'''simple docstring'''
return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict ):
'''simple docstring'''
lowercase__ : Dict = get_tokens(_lowerCAmelCase )
lowercase__ : List[str] = get_tokens(_lowerCAmelCase )
lowercase__ : List[Any] = collections.Counter(_lowerCAmelCase ) & collections.Counter(_lowerCAmelCase )
lowercase__ : int = sum(common.values() )
if len(_lowerCAmelCase ) == 0 or len(_lowerCAmelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowercase__ : Any = 1.0 * num_same / len(_lowerCAmelCase )
lowercase__ : Dict = 1.0 * num_same / len(_lowerCAmelCase )
lowercase__ : Any = (2 * precision * recall) / (precision + recall)
return fa
def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[int] = {}
lowercase__ : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowercase__ : Any = qa['id']
lowercase__ : Union[str, Any] = [t for t in qa['answers']['text'] if normalize_answer(_lowerCAmelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowercase__ : Dict = ['']
if qid not in preds:
print(f"""Missing prediction for {qid}""" )
continue
lowercase__ : Optional[int] = preds[qid]
# Take max over all gold answers
lowercase__ : int = max(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for a in gold_answers )
lowercase__ : Optional[Any] = max(compute_fa(_lowerCAmelCase , _lowerCAmelCase ) for a in gold_answers )
return exact_scores, fa_scores
def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ):
'''simple docstring'''
lowercase__ : str = {}
for qid, s in scores.items():
lowercase__ : int = na_probs[qid] > na_prob_thresh
if pred_na:
lowercase__ : Optional[Any] = float(not qid_to_has_ans[qid] )
else:
lowercase__ : Optional[Any] = s
return new_scores
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str=None ):
'''simple docstring'''
if not qid_list:
lowercase__ : Optional[Any] = len(_lowerCAmelCase )
return collections.OrderedDict(
[
('exact', 1_0_0.0 * sum(exact_scores.values() ) / total),
('f1', 1_0_0.0 * sum(fa_scores.values() ) / total),
('total', total),
] )
else:
lowercase__ : Optional[Any] = len(_lowerCAmelCase )
return collections.OrderedDict(
[
('exact', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
('f1', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
('total', total),
] )
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
for k in new_eval:
lowercase__ : int = new_eval[k]
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
'''simple docstring'''
plt.step(_lowerCAmelCase , _lowerCAmelCase , color='b' , alpha=0.2 , where='post' )
plt.fill_between(_lowerCAmelCase , _lowerCAmelCase , step='post' , alpha=0.2 , color='b' )
plt.xlabel('Recall' )
plt.ylabel('Precision' )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(_lowerCAmelCase )
plt.savefig(_lowerCAmelCase )
plt.clf()
def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[str]=None ):
'''simple docstring'''
lowercase__ : Optional[int] = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : na_probs[k] )
lowercase__ : Tuple = 0.0
lowercase__ : List[str] = 1.0
lowercase__ : List[str] = 0.0
lowercase__ : Union[str, Any] = [1.0]
lowercase__ : List[Any] = [0.0]
lowercase__ : Optional[int] = 0.0
for i, qid in enumerate(_lowerCAmelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowercase__ : Tuple = true_pos / float(i + 1 )
lowercase__ : Union[str, Any] = true_pos / float(_lowerCAmelCase )
if i == len(_lowerCAmelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_lowerCAmelCase )
recalls.append(_lowerCAmelCase )
if out_image:
plot_pr_curve(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return {"ap": 1_0_0.0 * avg_prec}
def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
'''simple docstring'''
if out_image_dir and not os.path.exists(_lowerCAmelCase ):
os.makedirs(_lowerCAmelCase )
lowercase__ : List[str] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowercase__ : Dict = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , 'pr_exact.png' ) , title='Precision-Recall curve for Exact Match score' , )
lowercase__ : Tuple = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , 'pr_f1.png' ) , title='Precision-Recall curve for F1 score' , )
lowercase__ : List[Any] = {k: float(_lowerCAmelCase ) for k, v in qid_to_has_ans.items()}
lowercase__ : Any = make_precision_recall_eval(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , out_image=os.path.join(_lowerCAmelCase , 'pr_oracle.png' ) , title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)' , )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'pr_exact' )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'pr_f1' )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'pr_oracle' )
def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
'''simple docstring'''
if not qid_list:
return
lowercase__ : List[str] = [na_probs[k] for k in qid_list]
lowercase__ : Tuple = np.ones_like(_lowerCAmelCase ) / float(len(_lowerCAmelCase ) )
plt.hist(_lowerCAmelCase , weights=_lowerCAmelCase , bins=20 , range=(0.0, 1.0) )
plt.xlabel('Model probability of no-answer' )
plt.ylabel('Proportion of dataset' )
plt.title(f"""Histogram of no-answer probability: {name}""" )
plt.savefig(os.path.join(_lowerCAmelCase , f"""na_prob_hist_{name}.png""" ) )
plt.clf()
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ : Tuple = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowercase__ : int = num_no_ans
lowercase__ : Optional[int] = cur_score
lowercase__ : Tuple = 0.0
lowercase__ : Dict = sorted(_lowerCAmelCase , key=lambda _lowerCAmelCase : na_probs[k] )
for i, qid in enumerate(_lowerCAmelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowercase__ : Optional[int] = scores[qid]
else:
if preds[qid]:
lowercase__ : List[Any] = -1
else:
lowercase__ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
lowercase__ : Dict = cur_score
lowercase__ : Optional[int] = na_probs[qid]
return 1_0_0.0 * best_score / len(_lowerCAmelCase ), best_thresh
def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ , lowercase__ : List[Any] = find_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ , lowercase__ : Dict = find_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Any = best_exact
lowercase__ : Tuple = exact_thresh
lowercase__ : Optional[Any] = best_fa
lowercase__ : Any = fa_thresh
def a_ ( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
lowercase__ : List[Any] = json.load(_lowerCAmelCase )
lowercase__ : Union[str, Any] = dataset_json['data']
with open(OPTS.pred_file ) as f:
lowercase__ : str = json.load(_lowerCAmelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowercase__ : Union[str, Any] = json.load(_lowerCAmelCase )
else:
lowercase__ : str = {k: 0.0 for k in preds}
lowercase__ : int = make_qid_to_has_ans(_lowerCAmelCase ) # maps qid to True/False
lowercase__ : List[str] = [k for k, v in qid_to_has_ans.items() if v]
lowercase__ : Any = [k for k, v in qid_to_has_ans.items() if not v]
lowercase__ , lowercase__ : Any = get_raw_scores(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Optional[Any] = apply_no_ans_threshold(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.na_prob_thresh )
lowercase__ : Union[str, Any] = apply_no_ans_threshold(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.na_prob_thresh )
lowercase__ : Tuple = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase )
if has_ans_qids:
lowercase__ : int = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase , qid_list=_lowerCAmelCase )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'HasAns' )
if no_ans_qids:
lowercase__ : Optional[Any] = make_eval_dict(_lowerCAmelCase , _lowerCAmelCase , qid_list=_lowerCAmelCase )
merge_eval(_lowerCAmelCase , _lowerCAmelCase , 'NoAns' )
if OPTS.na_prob_file:
find_all_best_thresh(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir )
histogram_na_prob(_lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir , 'hasAns' )
histogram_na_prob(_lowerCAmelCase , _lowerCAmelCase , OPTS.out_image_dir , 'noAns' )
if OPTS.out_file:
with open(OPTS.out_file , 'w' ) as f:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
else:
print(json.dumps(_lowerCAmelCase , indent=2 ) )
if __name__ == "__main__":
_UpperCamelCase : Optional[int] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 77 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCamelCase = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 221 |
from math import pi, sqrt
def _A ( lowerCAmelCase_ : float ):
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(lowerCAmelCase_ ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(lowerCAmelCase_ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _A ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(lowerCAmelCase_ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = 1.0
while num:
UpperCamelCase = float(input('Gamma of: '))
print(F"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 221 | 1 |
"""simple docstring"""
import numpy as np
import qiskit
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 8 ,_lowerCamelCase : int | None = None ) -> str:
_lowerCAmelCase : int = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase : Dict = rng.integers(2 ,size=_lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase : Tuple = rng.integers(2 ,size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase : Union[str, Any] = rng.integers(2 ,size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase : Dict = qiskit.QuantumCircuit(_lowerCamelCase ,name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase : List[str] = qiskit.execute(_lowerCamelCase ,_lowerCamelCase ,shots=1 ,seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase : List[Any] = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase : str = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase : List[Any] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase ,"""0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 44 | """simple docstring"""
_a : List[str] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = coefficient_matrix.shape
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = constant_matrix.shape
if rowsa != colsa:
__SCREAMING_SNAKE_CASE = f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase_ )
if colsa != 1:
__SCREAMING_SNAKE_CASE = f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase_ )
if rowsa != rowsa:
__SCREAMING_SNAKE_CASE = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(UpperCamelCase_ )
if len(UpperCamelCase_ ) != rowsa:
__SCREAMING_SNAKE_CASE = (
"""Number of initial values must be equal to number of rows in coefficient """
f"matrix but received {len(UpperCamelCase_ )} and {rowsa}"
)
raise ValueError(UpperCamelCase_ )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
__SCREAMING_SNAKE_CASE = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = table.shape
strictly_diagonally_dominant(UpperCamelCase_ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = []
for row in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = 0
for col in range(UpperCamelCase_ ):
if col == row:
__SCREAMING_SNAKE_CASE = table[row][col]
elif col == cols - 1:
__SCREAMING_SNAKE_CASE = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__SCREAMING_SNAKE_CASE = (temp + val) / denom
new_val.append(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = new_val
return [float(UpperCamelCase_ ) for i in new_val]
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = table.shape
__SCREAMING_SNAKE_CASE = True
for i in range(0 , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Union[str, Any] = XGLMTokenizer
__lowercase : int = XGLMTokenizerFast
__lowercase : Optional[Any] = True
__lowercase : str = True
def snake_case_ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """<pad>"""
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<s>""")
self.assertEqual(vocab_keys[1] , """<pad>""")
self.assertEqual(len(lowerCAmelCase__) , 1_0_0_8)
def snake_case_ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XGLMTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""")
self.assertListEqual(lowerCAmelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def snake_case_ ( self):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""")
def snake_case_ ( self):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase__ , f.name)
__SCREAMING_SNAKE_CASE = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = pickle.dumps(lowerCAmelCase__)
pickle.loads(lowerCAmelCase__)
def snake_case_ ( self):
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsé."""
__SCREAMING_SNAKE_CASE = tokenizer.tokenize(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = tokenizer.encode(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """Hello World!"""
__SCREAMING_SNAKE_CASE = [2, 3_1_2_2_7, 4_4_4_7, 3_5]
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
__SCREAMING_SNAKE_CASE = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5]
# fmt: on
self.assertListEqual(lowerCAmelCase__ , self.big_tokenizer.encode(lowerCAmelCase__))
@slow
def snake_case_ ( self):
# fmt: off
__SCREAMING_SNAKE_CASE = {
"""input_ids""": [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="""facebook/xglm-564M""" , padding=lowerCAmelCase__ , )
| 255 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : List[str] = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = """table-transformer"""
__lowerCamelCase = ["""past_key_values"""]
__lowerCamelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=100 , __UpperCamelCase=6 , __UpperCamelCase=2048 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=2048 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=256 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1.0 , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , ) -> str:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
snake_case__ : List[str] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case__ : Any = backbone_config.get('model_type' )
snake_case__ : List[str] = CONFIG_MAPPING[backbone_model_type]
snake_case__ : Optional[int] = config_class.from_dict(__UpperCamelCase )
# set timm attributes to None
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = None, None, None
snake_case__ : List[str] = use_timm_backbone
snake_case__ : Dict = backbone_config
snake_case__ : Dict = num_channels
snake_case__ : Dict = num_queries
snake_case__ : Dict = d_model
snake_case__ : Any = encoder_ffn_dim
snake_case__ : int = encoder_layers
snake_case__ : Union[str, Any] = encoder_attention_heads
snake_case__ : List[Any] = decoder_ffn_dim
snake_case__ : List[str] = decoder_layers
snake_case__ : str = decoder_attention_heads
snake_case__ : str = dropout
snake_case__ : List[Any] = attention_dropout
snake_case__ : List[str] = activation_dropout
snake_case__ : str = activation_function
snake_case__ : Optional[int] = init_std
snake_case__ : List[Any] = init_xavier_std
snake_case__ : List[str] = encoder_layerdrop
snake_case__ : List[Any] = decoder_layerdrop
snake_case__ : Union[str, Any] = encoder_layers
snake_case__ : Optional[Any] = auxiliary_loss
snake_case__ : str = position_embedding_type
snake_case__ : List[Any] = backbone
snake_case__ : Union[str, Any] = use_pretrained_backbone
snake_case__ : Any = dilation
# Hungarian matcher
snake_case__ : Any = class_cost
snake_case__ : Optional[Any] = bbox_cost
snake_case__ : Optional[Any] = giou_cost
# Loss coefficients
snake_case__ : Dict = mask_loss_coefficient
snake_case__ : Any = dice_loss_coefficient
snake_case__ : Any = bbox_loss_coefficient
snake_case__ : Optional[int] = giou_loss_coefficient
snake_case__ : List[str] = eos_coefficient
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def __a ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def __a ( self ) -> int:
'''simple docstring'''
return self.d_model
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = version.parse("""1.11""" )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __a ( self ) -> float:
'''simple docstring'''
return 1E-5
@property
def __a ( self ) -> int:
'''simple docstring'''
return 12
| 143 | import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
enable_full_determinism()
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = UNetaDModel
__lowerCamelCase = """sample"""
@property
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[Any] = 4
snake_case__ : List[Any] = 3
snake_case__ : int = (32, 32)
snake_case__ : List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
snake_case__ : str = torch.tensor([10] ).to(__UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def __a ( self ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
@property
def __a ( self ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Union[str, Any] = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
snake_case__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = UNetaDModel
__lowerCamelCase = """sample"""
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = 4
snake_case__ : List[Any] = 4
snake_case__ : List[str] = (32, 32)
snake_case__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
snake_case__ : int = torch.tensor([10] ).to(__UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def __a ( self ) -> int:
'''simple docstring'''
return (4, 32, 32)
@property
def __a ( self ) -> str:
'''simple docstring'''
return (4, 32, 32)
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Union[str, Any] = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
snake_case__ : List[Any] = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ , snake_case__ : Optional[int] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__UpperCamelCase )
snake_case__ : List[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ , snake_case__ : List[str] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase )
model.to(__UpperCamelCase )
snake_case__ : Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ , snake_case__ : List[str] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase )
model_accelerate.to(__UpperCamelCase )
model_accelerate.eval()
snake_case__ : Tuple = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case__ : Union[str, Any] = noise.to(__UpperCamelCase )
snake_case__ : List[str] = torch.tensor([10] * noise.shape[0] ).to(__UpperCamelCase )
snake_case__ : str = model_accelerate(__UpperCamelCase , __UpperCamelCase )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case__ , snake_case__ : Union[str, Any] = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=__UpperCamelCase , low_cpu_mem_usage=__UpperCamelCase )
model_normal_load.to(__UpperCamelCase )
model_normal_load.eval()
snake_case__ : List[str] = model_normal_load(__UpperCamelCase , __UpperCamelCase )['sample']
assert torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-3 )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[Any] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(__UpperCamelCase )
snake_case__ : Any = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case__ : List[Any] = noise.to(__UpperCamelCase )
snake_case__ : List[str] = torch.tensor([10] * noise.shape[0] ).to(__UpperCamelCase )
with torch.no_grad():
snake_case__ : List[str] = model(__UpperCamelCase , __UpperCamelCase ).sample
snake_case__ : Tuple = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case__ : int = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-3 ) )
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = UNetaDModel
__lowerCamelCase = """sample"""
@property
def __a ( self , __UpperCamelCase=(32, 32) ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Dict = 4
snake_case__ : Dict = 3
snake_case__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
snake_case__ : List[str] = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def __a ( self ) -> Optional[int]:
'''simple docstring'''
return (3, 32, 32)
@property
def __a ( self ) -> int:
'''simple docstring'''
return (3, 32, 32)
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[Any] = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
snake_case__ : str = self.dummy_input
return init_dict, inputs_dict
@slow
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ , snake_case__ : str = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(__UpperCamelCase )
snake_case__ : Dict = self.dummy_input
snake_case__ : Union[str, Any] = floats_tensor((4, 3) + (256, 256) ).to(__UpperCamelCase )
snake_case__ : List[Any] = noise
snake_case__ : Any = model(**__UpperCamelCase )
assert image is not None, "Make sure output is not None"
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : str = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(__UpperCamelCase )
snake_case__ : Optional[Any] = 4
snake_case__ : str = 3
snake_case__ : List[Any] = (256, 256)
snake_case__ : Dict = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
snake_case__ : int = torch.tensor(batch_size * [1E-4] ).to(__UpperCamelCase )
with torch.no_grad():
snake_case__ : str = model(__UpperCamelCase , __UpperCamelCase ).sample
snake_case__ : Optional[int] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case__ : Optional[int] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-2 ) )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Dict = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(__UpperCamelCase )
snake_case__ : Dict = 4
snake_case__ : List[str] = 3
snake_case__ : Union[str, Any] = (32, 32)
snake_case__ : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
snake_case__ : int = torch.tensor(batch_size * [1E-4] ).to(__UpperCamelCase )
with torch.no_grad():
snake_case__ : Tuple = model(__UpperCamelCase , __UpperCamelCase ).sample
snake_case__ : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case__ : Optional[int] = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1E-2 ) )
def __a ( self ) -> Tuple:
'''simple docstring'''
pass
| 143 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
snake_case_ : Optional[Any] = logging.get_logger(__name__)
class lowercase__ ( _SCREAMING_SNAKE_CASE ):
def __init__( self : Any ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : str ):
'''simple docstring'''
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' ,lowerCamelCase__ ,)
super().__init__(*lowerCamelCase__ ,**lowerCamelCase__ )
| 366 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
snake_case_ : Any = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 236 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Optional[Any] = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[Any] = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[int] = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__a :Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 312 |
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ :List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : str =XLNetTokenizer
lowercase_ : Dict =XLNetTokenizerFast
lowercase_ : str =True
lowercase_ : str =True
def A__ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = XLNetTokenizer(A__ ,keep_accents=A__)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def A__ ( self):
lowercase = '''<s>'''
lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A__) ,A__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A__) ,A__)
def A__ ( self):
lowercase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] ,'''<unk>''')
self.assertEqual(vocab_keys[1] ,'''<s>''')
self.assertEqual(vocab_keys[-1] ,'''<eod>''')
self.assertEqual(len(A__) ,1_0_0_6)
def A__ ( self):
self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_0)
def A__ ( self):
lowercase = XLNetTokenizer(A__ ,keep_accents=A__)
lowercase = tokenizer.tokenize('''This is a test''')
self.assertListEqual(A__ ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__) ,[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2])
lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
A__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] ,)
lowercase = tokenizer.convert_tokens_to_ids(A__)
self.assertListEqual(A__ ,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4])
lowercase = tokenizer.convert_ids_to_tokens(A__)
self.assertListEqual(
A__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] ,)
def A__ ( self):
lowercase = XLNetTokenizer(A__ ,do_lower_case=A__)
lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
A__ ,[
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] ,)
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''▁he''', '''ll''', '''o'''])
def A__ ( self):
lowercase = XLNetTokenizer(A__ ,do_lower_case=A__)
lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
A__ ,[
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] ,)
@slow
def A__ ( self):
lowercase = XLNetTokenizer.from_pretrained('''xlnet-base-cased''')
lowercase = tokenizer.encode('''sequence builders''' ,add_special_tokens=A__)
lowercase = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__)
lowercase = tokenizer.build_inputs_with_special_tokens(A__ ,A__)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def A__ ( self):
# fmt: off
lowercase = {'''input_ids''': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A__ ,model_name='''xlnet-base-cased''' ,revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' ,)
| 101 | 0 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def _lowerCAmelCase ( __snake_case : float , __snake_case : float , __snake_case : bool = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(__snake_case ), magnitude * sin(__snake_case )]
return [magnitude * cos(radians(__snake_case ) ), magnitude * sin(radians(__snake_case ) )]
def _lowerCAmelCase ( __snake_case : NDArray[floataa] , __snake_case : NDArray[floataa] , __snake_case : float = 10**-1 ) -> bool:
__A : NDArray[floataa] = cross(__snake_case , __snake_case )
__A : float = sum(__snake_case )
return abs(__snake_case ) < eps
if __name__ == "__main__":
# Test to check if it works
lowercase__ : Dict = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
lowercase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowercase__ : Any = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
lowercase__ : Optional[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowercase__ : Optional[int] = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
lowercase__ : int = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod() | 190 |
'''simple docstring'''
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class SCREAMING_SNAKE_CASE (a__ ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = SMALL_MODEL_IDENTIFIER
__A : Any = 'pt'
__A : str = 'tf'
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Any = AutoModel.from_pretrained(self.test_model)
model_pt.save_pretrained(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=_UpperCAmelCase)
model_tf.save_pretrained(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = 'mock_framework'
# Framework provided - return whatever the user provides
__A : Union[str, Any] = FeaturesManager.determine_framework(self.test_model , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_UpperCAmelCase)
__A : List[str] = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_UpperCAmelCase)
__A : Tuple = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_UpperCAmelCase)
__A : Optional[int] = FeaturesManager.determine_framework(_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , self.framework_pt)
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_UpperCAmelCase)
__A : List[str] = FeaturesManager.determine_framework(_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , self.framework_tf)
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_UpperCAmelCase):
__A : Tuple = FeaturesManager.determine_framework(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = MagicMock(return_value=_UpperCAmelCase)
with patch('transformers.onnx.features.is_tf_available' , _UpperCAmelCase):
__A : Union[str, Any] = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(_UpperCAmelCase , self.framework_pt)
# PyTorch not in environment -> use TensorFlow
__A : List[str] = MagicMock(return_value=_UpperCAmelCase)
with patch('transformers.onnx.features.is_torch_available' , _UpperCAmelCase):
__A : List[Any] = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(_UpperCAmelCase , self.framework_tf)
# Both in environment -> use PyTorch
__A : Any = MagicMock(return_value=_UpperCAmelCase)
__A : Dict = MagicMock(return_value=_UpperCAmelCase)
with patch('transformers.onnx.features.is_tf_available' , _UpperCAmelCase), patch(
'transformers.onnx.features.is_torch_available' , _UpperCAmelCase):
__A : int = FeaturesManager.determine_framework(self.test_model)
self.assertEqual(_UpperCAmelCase , self.framework_pt)
# Both not in environment -> raise error
__A : List[str] = MagicMock(return_value=_UpperCAmelCase)
__A : Tuple = MagicMock(return_value=_UpperCAmelCase)
with patch('transformers.onnx.features.is_tf_available' , _UpperCAmelCase), patch(
'transformers.onnx.features.is_torch_available' , _UpperCAmelCase):
with self.assertRaises(_UpperCAmelCase):
__A : int = FeaturesManager.determine_framework(self.test_model) | 190 | 1 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def A ( _SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=1026 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" ,_SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" ,) -> List[str]:
set_seed(3 )
# generate train_data and objective_set
lowerCamelCase , lowerCamelCase : List[Any] = generate_datasets(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,number=_SCREAMING_SNAKE_CASE ,min_len=1026 ,trim=_SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowerCamelCase : Optional[int] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
lowerCamelCase : Any = load_gpta("gpt2" ).to(_SCREAMING_SNAKE_CASE )
print("computing perplexity on objective set" )
lowerCamelCase : str = compute_perplexity(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).item()
print("perplexity on objective set:" ,_SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=15 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE="igf_model.pt" ,) -> List[Any]:
set_seed(42 )
# Load pre-trained model
lowerCamelCase : str = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
lowerCamelCase : Union[str, Any] = SecondaryLearner(_SCREAMING_SNAKE_CASE )
# Train secondary learner
lowerCamelCase : Dict = train_secondary_learner(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,max_epochs=_SCREAMING_SNAKE_CASE ,batch_size=_SCREAMING_SNAKE_CASE ,eval_freq=100 ,igf_model_path=_SCREAMING_SNAKE_CASE ,)
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=1000 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=1.0 ,_SCREAMING_SNAKE_CASE=recopy_gpta ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" ,) -> str:
lowerCamelCase : Optional[int] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
lowerCamelCase : Optional[Any] = RandomSampler(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = DataLoader(_SCREAMING_SNAKE_CASE ,sampler=_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = max_steps // (len(_SCREAMING_SNAKE_CASE )) + 1
lowerCamelCase : List[Any] = 0
lowerCamelCase : str = torch.zeros((1, context_len) ,dtype=torch.long ,device=_SCREAMING_SNAKE_CASE )
lowerCamelCase , lowerCamelCase , lowerCamelCase : int = recopy_model(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(_SCREAMING_SNAKE_CASE )
secondary_learner.eval()
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : Union[str, Any] = []
# Compute the performance of the transformer model at the beginning
lowerCamelCase : Union[str, Any] = compute_perplexity(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
test_perps.append(_SCREAMING_SNAKE_CASE )
print("Test perplexity, step" ,_SCREAMING_SNAKE_CASE ,":" ,_SCREAMING_SNAKE_CASE )
for epoch in range(int(_SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(_SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
lowerCamelCase : Dict = random.randint(0 ,example.size(2 ) - context_len - 1 )
lowerCamelCase : Union[str, Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowerCamelCase : Any = model(_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = True
if secondary_learner is not None:
lowerCamelCase : Dict = secondary_learner.forward(
torch.tensor(_SCREAMING_SNAKE_CASE ,dtype=torch.long ,device=_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowerCamelCase : Tuple = -1
if predicted_q < threshold:
lowerCamelCase : List[str] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowerCamelCase : int = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowerCamelCase : List[Any] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() ,3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowerCamelCase : List[str] = compute_perplexity(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
test_perps.append(_SCREAMING_SNAKE_CASE )
print("Test perplexity, step" ,_SCREAMING_SNAKE_CASE ,":" ,_SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() ,_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def A ( ) -> Optional[Any]:
lowerCamelCase : List[str] = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" ,default=_SCREAMING_SNAKE_CASE ,type=_SCREAMING_SNAKE_CASE ,required=_SCREAMING_SNAKE_CASE ,help="The input data dir. Should contain data files for WikiText." ,)
parser.add_argument(
"--model_name_or_path" ,default=_SCREAMING_SNAKE_CASE ,type=_SCREAMING_SNAKE_CASE ,required=_SCREAMING_SNAKE_CASE ,help="Path to pretrained model or model identifier from huggingface.co/models" ,)
parser.add_argument(
"--data_file" ,type=_SCREAMING_SNAKE_CASE ,default=_SCREAMING_SNAKE_CASE ,help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) ,)
parser.add_argument(
"--igf_data_file" ,type=_SCREAMING_SNAKE_CASE ,default=_SCREAMING_SNAKE_CASE ,help="A jbl file containing the context and information gain pairs to train secondary learner." ,)
parser.add_argument(
"--output_dir" ,default=_SCREAMING_SNAKE_CASE ,type=_SCREAMING_SNAKE_CASE ,required=_SCREAMING_SNAKE_CASE ,help="The output directory where the final fine-tuned model is stored." ,)
parser.add_argument(
"--tokenizer_name" ,default=_SCREAMING_SNAKE_CASE ,type=_SCREAMING_SNAKE_CASE ,help="Pretrained tokenizer name or path if not the same as model_name" ,)
parser.add_argument("--seed" ,type=_SCREAMING_SNAKE_CASE ,default=_SCREAMING_SNAKE_CASE ,help="A seed for reproducible training." )
parser.add_argument(
"--context_len" ,default=32 ,type=_SCREAMING_SNAKE_CASE ,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) ,)
parser.add_argument(
"--size_objective_set" ,default=100 ,type=_SCREAMING_SNAKE_CASE ,help="number of articles that are long enough to be used as our objective set" ,)
parser.add_argument(
"--eval_freq" ,default=100 ,type=_SCREAMING_SNAKE_CASE ,help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" ,default=1000 ,type=_SCREAMING_SNAKE_CASE ,help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" ,default=128 ,type=_SCREAMING_SNAKE_CASE ,help="batch size of training data for secondary learner" ,)
parser.add_argument(
"--batch_size" ,default=16 ,type=_SCREAMING_SNAKE_CASE ,help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" ,default=10 ,type=_SCREAMING_SNAKE_CASE ,help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) ,)
parser.add_argument(
"--number" ,default=100 ,type=_SCREAMING_SNAKE_CASE ,help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" ,default=1026 ,type=_SCREAMING_SNAKE_CASE ,help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" ,default=15 ,type=_SCREAMING_SNAKE_CASE ,help="number of epochs to train secondary learner" )
parser.add_argument("--trim" ,default=_SCREAMING_SNAKE_CASE ,type=_SCREAMING_SNAKE_CASE ,help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" ,default=1.0 ,type=_SCREAMING_SNAKE_CASE ,help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) ,)
parser.add_argument("--finetuned_model_name" ,default="gpt2_finetuned.pt" ,type=_SCREAMING_SNAKE_CASE ,help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" ,default=_SCREAMING_SNAKE_CASE ,type=_SCREAMING_SNAKE_CASE ,help="Reset the model to the original pretrained GPT-2 weights after each iteration" ,)
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 ,max_steps=10 ,size_objective_set=100 ,min_len=1026 ,trim=_SCREAMING_SNAKE_CASE ,data_file="data/tokenized_stories_train_wikitext103.jbl" ,igf_data_file="igf_context_pairs.jbl" ,)
# Load train data for secondary learner
lowerCamelCase : str = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
lowerCamelCase : Tuple = training_secondary_learner(
_SCREAMING_SNAKE_CASE ,secondary_learner_max_epochs=15 ,secondary_learner_batch_size=128 ,eval_freq=100 ,igf_model_path="igf_model.pt" ,)
# load pretrained gpt2 model
lowerCamelCase : Union[str, Any] = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowerCamelCase , lowerCamelCase : int = generate_datasets(
context_len=32 ,file="data/tokenized_stories_train_wikitext103.jbl" ,number=100 ,min_len=1026 ,trim=_SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,context_len=32 ,max_steps=1000 ,batch_size=16 ,threshold=1.0 ,recopy_model=_SCREAMING_SNAKE_CASE ,secondary_learner=_SCREAMING_SNAKE_CASE ,eval_interval=10 ,finetuned_model_name="gpt2_finetuned.pt" ,)
if __name__ == "__main__":
main()
| 48 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Optional[Any] ={
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] =[
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__snake_case : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 129 | 0 |
import numpy as np
def snake_case_(_UpperCamelCase ) -> np.ndarray:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def snake_case_(_UpperCamelCase ) -> np.ndarray:
"""simple docstring"""
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__A = logging.get_logger(__name__)
__A = '''▁'''
__A = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__A = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
__A = {
'''facebook/m2m100_418M''': 10_24,
}
# fmt: off
__A = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class lowercase_ ( __lowercase ):
UpperCamelCase_ : str = VOCAB_FILES_NAMES
UpperCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Union[str, Any] = ["input_ids", "attention_mask"]
UpperCamelCase_ : List[int] = []
UpperCamelCase_ : List[int] = []
def __init__( self : str , A__ : str , A__ : Optional[Any] , A__ : Union[str, Any]=None , A__ : Dict=None , A__ : Any="<s>" , A__ : Union[str, Any]="</s>" , A__ : Tuple="</s>" , A__ : Dict="<pad>" , A__ : List[Any]="<unk>" , A__ : str="m2m100" , A__ : Optional[Dict[str, Any]] = None , A__ : List[Any]=8 , **A__ : Union[str, Any] , ) -> None:
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
_snake_case = language_codes
_snake_case = FAIRSEQ_LANGUAGE_CODES[language_codes]
_snake_case = {lang_code: f"""__{lang_code}__""" for lang_code in fairseq_language_code}
_snake_case = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A__ )
for lang_code in fairseq_language_code
if self.get_lang_token(A__ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A__ , tgt_lang=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , unk_token=A__ , pad_token=A__ , language_codes=A__ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A__ , **A__ , )
_snake_case = vocab_file
_snake_case = load_json(A__ )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = spm_file
_snake_case = load_spm(A__ , self.sp_model_kwargs )
_snake_case = len(self.encoder )
_snake_case = {
self.get_lang_token(A__ ): self.encoder_size + i for i, lang_code in enumerate(A__ )
}
_snake_case = {lang_code: self.encoder_size + i for i, lang_code in enumerate(A__ )}
_snake_case = {v: k for k, v in self.lang_token_to_id.items()}
_snake_case = src_lang if src_lang is not None else '''en'''
_snake_case = tgt_lang
_snake_case = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_snake_case = num_madeup_words
@property
def UpperCamelCase_ ( self : int ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCamelCase_ ( self : Dict ) -> str:
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self : List[str] , A__ : str ) -> None:
_snake_case = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase_ ( self : Any , A__ : str ) -> List[str]:
return self.sp_model.encode(A__ , out_type=A__ )
def UpperCamelCase_ ( self : Optional[int] , A__ : Dict ) -> str:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A__ , self.encoder[self.unk_token] )
def UpperCamelCase_ ( self : Union[str, Any] , A__ : int ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A__ , self.unk_token )
def UpperCamelCase_ ( self : Optional[int] , A__ : Optional[int] ) -> List[Any]:
_snake_case = []
_snake_case = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A__ ) + token
_snake_case = []
else:
current_sub_tokens.append(A__ )
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def UpperCamelCase_ ( self : str , A__ : List[int] , A__ : Optional[List[int]] = None , A__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
_snake_case = [1] * len(self.prefix_tokens )
_snake_case = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A__ )) + suffix_ones
return prefix_ones + ([0] * len(A__ )) + ([0] * len(A__ )) + suffix_ones
def UpperCamelCase_ ( self : Tuple , A__ : List[int] , A__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self : str ) -> Dict:
_snake_case = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Dict:
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self : Union[str, Any] , A__ : Dict ) -> None:
_snake_case = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_snake_case = {}
_snake_case = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCamelCase_ ( self : Any , A__ : str , A__ : Optional[str] = None ) -> Tuple[str]:
_snake_case = Path(A__ )
if not save_dir.is_dir():
raise OSError(f"""{save_directory} should be a directory""" )
_snake_case = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_snake_case = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , A__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A__ )
elif not os.path.isfile(self.spm_file ):
with open(A__ , '''wb''' ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (str(A__ ), str(A__ ))
def UpperCamelCase_ ( self : Optional[int] , A__ : List[str] , A__ : str = "en" , A__ : Optional[List[str]] = None , A__ : str = "ro" , **A__ : List[Any] , ) -> BatchEncoding:
_snake_case = src_lang
_snake_case = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A__ , A__ , **A__ )
def UpperCamelCase_ ( self : List[str] , A__ : int , A__ : Optional[str] , A__ : Optional[str] , **A__ : Union[str, Any] ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_snake_case = src_lang
_snake_case = self(A__ , add_special_tokens=A__ , **A__ )
_snake_case = self.get_lang_id(A__ )
_snake_case = tgt_lang_id
return inputs
def UpperCamelCase_ ( self : Dict ) -> Optional[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase_ ( self : Optional[Any] ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase_ ( self : List[Any] , A__ : str ) -> None:
_snake_case = self.get_lang_token(A__ )
_snake_case = self.lang_token_to_id[lang_token]
_snake_case = [self.cur_lang_id]
_snake_case = [self.eos_token_id]
def UpperCamelCase_ ( self : List[str] , A__ : str ) -> None:
_snake_case = self.get_lang_token(A__ )
_snake_case = self.lang_token_to_id[lang_token]
_snake_case = [self.cur_lang_id]
_snake_case = [self.eos_token_id]
def UpperCamelCase_ ( self : Dict , A__ : str ) -> str:
return self.lang_code_to_token[lang]
def UpperCamelCase_ ( self : Tuple , A__ : str ) -> int:
_snake_case = self.get_lang_token(A__ )
return self.lang_token_to_id[lang_token]
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> sentencepiece.SentencePieceProcessor:
"""simple docstring"""
_snake_case = sentencepiece.SentencePieceProcessor(**_UpperCamelCase )
spm.Load(str(_UpperCamelCase ) )
return spm
def snake_case_(_UpperCamelCase ) -> Union[Dict, List]:
"""simple docstring"""
with open(_UpperCamelCase , '''r''' ) as f:
return json.load(_UpperCamelCase )
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
with open(_UpperCamelCase , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase , indent=2 )
| 278 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class _UpperCAmelCase ( lowercase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCamelCase = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCamelCase = Features({'''text''': Value('''string''' )} )
UpperCamelCase = Features({'''labels''': ClassLabel} )
UpperCamelCase = "text"
UpperCamelCase = "labels"
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Union[str, Any] ):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , __UpperCamelCase ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
A = copy.deepcopy(self )
A = self.label_schema.copy()
A = features[self.label_column]
A = label_schema
return task_template
@property
def lowerCamelCase ( self :Optional[Any] ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 292 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _UpperCAmelCase :
UpperCamelCase = PegasusConfig
UpperCamelCase = {}
UpperCamelCase = '''gelu'''
def __init__( self :Union[str, Any] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :str=13 , __UpperCamelCase :List[Any]=7 , __UpperCamelCase :Union[str, Any]=True , __UpperCamelCase :List[Any]=False , __UpperCamelCase :Any=99 , __UpperCamelCase :Tuple=32 , __UpperCamelCase :Optional[int]=2 , __UpperCamelCase :Optional[Any]=4 , __UpperCamelCase :Tuple=37 , __UpperCamelCase :Optional[Any]=0.1 , __UpperCamelCase :Tuple=0.1 , __UpperCamelCase :Optional[int]=40 , __UpperCamelCase :Tuple=2 , __UpperCamelCase :Dict=1 , __UpperCamelCase :Any=0 , ):
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = eos_token_id
A = pad_token_id
A = bos_token_id
def lowerCamelCase ( self :Tuple ):
A = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A = tf.concat([input_ids, eos_tensor] , axis=1 )
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCamelCase ( self :str , __UpperCamelCase :str , __UpperCamelCase :Union[str, Any] ):
A = TFPegasusModel(config=__UpperCamelCase ).get_decoder()
A = inputs_dict["input_ids"]
A = input_ids[:1, :]
A = inputs_dict["attention_mask"][:1, :]
A = inputs_dict["head_mask"]
A = 1
# first forward pass
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
A, A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) , config.vocab_size )
A = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A = tf.concat([input_ids, next_tokens] , axis=-1 )
A = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
A = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A = output_from_no_past[:, -3:, random_slice_idx]
A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 )
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , ):
if attention_mask is None:
A = tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
def lowerCamelCase ( self :int ):
A = TFPegasusModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCamelCase ( self :Dict ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self :Any ):
A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase = '''google/pegasus-xsum'''
@cached_property
def lowerCamelCase ( self :Any ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowerCamelCase ( self :Dict ):
A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowerCamelCase ( self :str , **__UpperCamelCase :str ):
A = self.translate_src_text(**__UpperCamelCase )
assert self.expected_text == generated_words
def lowerCamelCase ( self :Any , **__UpperCamelCase :List[str] ):
A = self.tokenizer(self.src_text , **__UpperCamelCase , padding=__UpperCamelCase , return_tensors="tf" )
A = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__UpperCamelCase , )
A = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__UpperCamelCase )
return generated_words
@slow
def lowerCamelCase ( self :Union[str, Any] ):
self._assert_generated_batch_equal_expected()
| 292 | 1 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
'''simple docstring'''
_a = 'SpeechT5FeatureExtractor'
_a = 'SpeechT5Tokenizer'
def __init__( self : Dict, lowerCamelCase : Optional[int], lowerCamelCase : str )-> Any:
super().__init__(lowerCamelCase, lowerCamelCase )
def __call__( self : Tuple, *lowerCamelCase : List[str], **lowerCamelCase : Optional[int] )-> List[str]:
lowerCamelCase__ : List[Any] =kwargs.pop('''audio''', lowerCamelCase )
lowerCamelCase__ : List[str] =kwargs.pop('''text''', lowerCamelCase )
lowerCamelCase__ : int =kwargs.pop('''text_target''', lowerCamelCase )
lowerCamelCase__ : Dict =kwargs.pop('''audio_target''', lowerCamelCase )
lowerCamelCase__ : Any =kwargs.pop('''sampling_rate''', lowerCamelCase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
lowerCamelCase__ : Union[str, Any] =self.feature_extractor(lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, **lowerCamelCase )
elif text is not None:
lowerCamelCase__ : List[Any] =self.tokenizer(lowerCamelCase, **lowerCamelCase )
else:
lowerCamelCase__ : Any =None
if audio_target is not None:
lowerCamelCase__ : List[str] =self.feature_extractor(audio_target=lowerCamelCase, *lowerCamelCase, sampling_rate=lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : Tuple =targets['''input_values''']
elif text_target is not None:
lowerCamelCase__ : Dict =self.tokenizer(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : int =targets['''input_ids''']
else:
lowerCamelCase__ : List[str] =None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__ : Dict =labels
lowerCamelCase__ : Any =targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCamelCase__ : Dict =decoder_attention_mask
return inputs
def snake_case ( self : int, *lowerCamelCase : Optional[Any], **lowerCamelCase : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[Any] =kwargs.pop('''input_values''', lowerCamelCase )
lowerCamelCase__ : Union[str, Any] =kwargs.pop('''input_ids''', lowerCamelCase )
lowerCamelCase__ : Optional[Any] =kwargs.pop('''labels''', lowerCamelCase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
lowerCamelCase__ : List[str] =self.feature_extractor.pad(lowerCamelCase, *lowerCamelCase, **lowerCamelCase )
elif input_ids is not None:
lowerCamelCase__ : Tuple =self.tokenizer.pad(lowerCamelCase, **lowerCamelCase )
else:
lowerCamelCase__ : Any =None
if labels is not None:
if "input_ids" in labels or (isinstance(lowerCamelCase, lowerCamelCase ) and "input_ids" in labels[0]):
lowerCamelCase__ : str =self.tokenizer.pad(lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[Any] =targets['''input_ids''']
else:
lowerCamelCase__ : Any =self.feature_extractor.feature_size
lowerCamelCase__ : Optional[Any] =self.feature_extractor.num_mel_bins
lowerCamelCase__ : Optional[int] =self.feature_extractor.pad(lowerCamelCase, *lowerCamelCase, **lowerCamelCase )
lowerCamelCase__ : List[Any] =feature_size_hack
lowerCamelCase__ : Tuple =targets['''input_values''']
else:
lowerCamelCase__ : Optional[Any] =None
if inputs is None:
return targets
if targets is not None:
lowerCamelCase__ : Tuple =labels
lowerCamelCase__ : Optional[int] =targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowerCamelCase__ : Optional[Any] =decoder_attention_mask
return inputs
def snake_case ( self : List[str], *lowerCamelCase : Union[str, Any], **lowerCamelCase : List[Any] )-> List[Any]:
return self.tokenizer.batch_decode(*lowerCamelCase, **lowerCamelCase )
def snake_case ( self : List[str], *lowerCamelCase : List[Any], **lowerCamelCase : Tuple )-> int:
return self.tokenizer.decode(*lowerCamelCase, **lowerCamelCase )
| 272 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict, lowerCamelCase : str, lowerCamelCase : Dict=13, lowerCamelCase : Optional[Any]=7, lowerCamelCase : List[Any]=True, lowerCamelCase : Dict=True, lowerCamelCase : List[Any]=True, lowerCamelCase : Optional[int]=True, lowerCamelCase : int=99, lowerCamelCase : Optional[int]=[1, 1, 2], lowerCamelCase : str=1, lowerCamelCase : List[Any]=32, lowerCamelCase : str=4, lowerCamelCase : Dict=8, lowerCamelCase : List[Any]=37, lowerCamelCase : Optional[int]="gelu_new", lowerCamelCase : Union[str, Any]=0.1, lowerCamelCase : List[Any]=0.1, lowerCamelCase : List[Any]=0.0, lowerCamelCase : Dict=512, lowerCamelCase : Dict=3, lowerCamelCase : str=0.02, lowerCamelCase : str=3, lowerCamelCase : Optional[int]=4, lowerCamelCase : List[str]=None, lowerCamelCase : Tuple=False, )-> Union[str, Any]:
lowerCamelCase__ : int =parent
lowerCamelCase__ : Dict =batch_size
lowerCamelCase__ : Dict =seq_length
lowerCamelCase__ : Any =is_training
lowerCamelCase__ : int =use_input_mask
lowerCamelCase__ : Tuple =use_token_type_ids
lowerCamelCase__ : int =use_labels
lowerCamelCase__ : Tuple =vocab_size
lowerCamelCase__ : Union[str, Any] =block_sizes
lowerCamelCase__ : Any =num_decoder_layers
lowerCamelCase__ : Optional[Any] =d_model
lowerCamelCase__ : List[str] =n_head
lowerCamelCase__ : List[Any] =d_head
lowerCamelCase__ : Dict =d_inner
lowerCamelCase__ : Dict =hidden_act
lowerCamelCase__ : List[str] =hidden_dropout
lowerCamelCase__ : Union[str, Any] =attention_dropout
lowerCamelCase__ : Union[str, Any] =activation_dropout
lowerCamelCase__ : Dict =max_position_embeddings
lowerCamelCase__ : Dict =type_vocab_size
lowerCamelCase__ : Union[str, Any] =2
lowerCamelCase__ : Optional[int] =num_labels
lowerCamelCase__ : List[str] =num_choices
lowerCamelCase__ : Tuple =scope
lowerCamelCase__ : Optional[int] =initializer_std
# Used in the tests to check the size of the first attention layer
lowerCamelCase__ : List[str] =n_head
# Used in the tests to check the size of the first hidden state
lowerCamelCase__ : Tuple =self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCamelCase__ : List[Any] =sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCamelCase__ : Union[str, Any] =self.num_hidden_layers + 2
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : Dict =ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCamelCase__ : Union[str, Any] =None
if self.use_input_mask:
lowerCamelCase__ : Any =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : int =None
if self.use_token_type_ids:
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowerCamelCase__ : List[str] =None
lowerCamelCase__ : Union[str, Any] =None
lowerCamelCase__ : List[str] =None
if self.use_labels:
lowerCamelCase__ : List[Any] =ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowerCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size], self.num_choices )
lowerCamelCase__ : Optional[int] =FunnelConfig(
vocab_size=self.vocab_size, block_sizes=self.block_sizes, num_decoder_layers=self.num_decoder_layers, d_model=self.d_model, n_head=self.n_head, d_head=self.d_head, d_inner=self.d_inner, hidden_act=self.hidden_act, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, activation_dropout=self.activation_dropout, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_std=self.initializer_std, )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case ( self : List[Any], lowerCamelCase : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Dict, )-> Union[str, Any]:
lowerCamelCase__ : Tuple =TFFunnelModel(config=lowerCamelCase )
lowerCamelCase__ : Dict ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : Tuple =model(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =[input_ids, input_mask]
lowerCamelCase__ : List[Any] =model(lowerCamelCase )
lowerCamelCase__ : Any =model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model) )
lowerCamelCase__ : int =False
lowerCamelCase__ : Any =TFFunnelModel(config=lowerCamelCase )
lowerCamelCase__ : str =model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model) )
lowerCamelCase__ : Dict =False
lowerCamelCase__ : Optional[int] =TFFunnelModel(config=lowerCamelCase )
lowerCamelCase__ : Tuple =model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model) )
def snake_case ( self : Tuple, lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Optional[Any], lowerCamelCase : Tuple, lowerCamelCase : Dict, )-> Optional[Any]:
lowerCamelCase__ : List[str] =TFFunnelBaseModel(config=lowerCamelCase )
lowerCamelCase__ : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase )
lowerCamelCase__ : Tuple =[input_ids, input_mask]
lowerCamelCase__ : Any =model(lowerCamelCase )
lowerCamelCase__ : Optional[Any] =model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model) )
lowerCamelCase__ : List[Any] =False
lowerCamelCase__ : Dict =TFFunnelBaseModel(config=lowerCamelCase )
lowerCamelCase__ : int =model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 3, self.d_model) )
lowerCamelCase__ : Union[str, Any] =False
lowerCamelCase__ : Optional[Any] =TFFunnelBaseModel(config=lowerCamelCase )
lowerCamelCase__ : str =model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model) )
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : List[Any], lowerCamelCase : Dict, lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : List[Any], )-> List[Any]:
lowerCamelCase__ : List[str] =TFFunnelForPreTraining(config=lowerCamelCase )
lowerCamelCase__ : List[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length) )
def snake_case ( self : str, lowerCamelCase : Tuple, lowerCamelCase : str, lowerCamelCase : List[Any], lowerCamelCase : List[Any], lowerCamelCase : str, lowerCamelCase : Tuple, lowerCamelCase : int, )-> List[Any]:
lowerCamelCase__ : Union[str, Any] =TFFunnelForMaskedLM(config=lowerCamelCase )
lowerCamelCase__ : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : List[Any] =model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : Optional[int], lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : Dict, )-> Union[str, Any]:
lowerCamelCase__ : Optional[Any] =self.num_labels
lowerCamelCase__ : Tuple =TFFunnelForSequenceClassification(config=lowerCamelCase )
lowerCamelCase__ : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : List[str] =model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case ( self : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : int, lowerCamelCase : Tuple, )-> int:
lowerCamelCase__ : int =self.num_choices
lowerCamelCase__ : List[Any] =TFFunnelForMultipleChoice(config=lowerCamelCase )
lowerCamelCase__ : int =tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowerCamelCase__ : Union[str, Any] =tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowerCamelCase__ : Optional[Any] =tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowerCamelCase__ : Union[str, Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCamelCase__ : str =model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def snake_case ( self : str, lowerCamelCase : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : Any, lowerCamelCase : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Dict, )-> Optional[int]:
lowerCamelCase__ : Optional[Any] =self.num_labels
lowerCamelCase__ : Optional[Any] =TFFunnelForTokenClassification(config=lowerCamelCase )
lowerCamelCase__ : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : Optional[int], lowerCamelCase : Dict, lowerCamelCase : str, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Optional[int], )-> Tuple:
lowerCamelCase__ : Tuple =TFFunnelForQuestionAnswering(config=lowerCamelCase )
lowerCamelCase__ : Union[str, Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : Optional[int] =model(lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def snake_case ( self : int )-> List[str]:
lowerCamelCase__ : List[Any] =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Tuple =config_and_inputs
lowerCamelCase__ : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_a = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_a = False
_a = False
def snake_case ( self : str )-> Tuple:
lowerCamelCase__ : Any =TFFunnelModelTester(self )
lowerCamelCase__ : Any =ConfigTester(self, config_class=lowerCamelCase )
def snake_case ( self : List[str] )-> Tuple:
self.config_tester.run_common_tests()
def snake_case ( self : str )-> List[Any]:
lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def snake_case ( self : str )-> Dict:
lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase )
def snake_case ( self : int )-> List[Any]:
lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def snake_case ( self : Dict )-> Any:
lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
def snake_case ( self : Tuple )-> Optional[Any]:
lowerCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
@require_tf
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_a = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_a = False
_a = False
def snake_case ( self : int )-> Tuple:
lowerCamelCase__ : Union[str, Any] =TFFunnelModelTester(self, base=lowerCamelCase )
lowerCamelCase__ : Tuple =ConfigTester(self, config_class=lowerCamelCase )
def snake_case ( self : Any )-> Any:
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] )-> Optional[Any]:
lowerCamelCase__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCamelCase )
def snake_case ( self : Union[str, Any] )-> int:
lowerCamelCase__ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def snake_case ( self : List[str] )-> Optional[int]:
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase )
| 272 | 1 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_A = 50_000
_A = 5_000
_A , _A = os.path.split(__file__)
_A = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCamelCase__ ( a__ : datasets.Dataset , a__ : str ) -> Dict:
for i in range(a__ ):
UpperCamelCase_ = dataset[i]
@get_duration
def lowerCamelCase__ ( a__ : datasets.Dataset , a__ : List[Any] , a__ : Union[str, Any] ) -> Dict:
for i in range(0 , len(a__ ) , a__ ):
UpperCamelCase_ = dataset[i : i + batch_size]
@get_duration
def lowerCamelCase__ ( a__ : datasets.Dataset , a__ : List[str] , a__ : Optional[Any] ) -> str:
with dataset.formatted_as(type=a__ ):
for i in range(a__ ):
UpperCamelCase_ = dataset[i]
@get_duration
def lowerCamelCase__ ( a__ : datasets.Dataset , a__ : Any , a__ : Tuple , a__ : Optional[Any] ) -> int:
with dataset.formatted_as(type=a__ ):
for i in range(0 , a__ , a__ ):
UpperCamelCase_ = dataset[i : i + batch_size]
def lowerCamelCase__ ( ) -> List[Any]:
UpperCamelCase_ = {"""num examples""": SPEED_TEST_N_EXAMPLES}
UpperCamelCase_ = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
UpperCamelCase_ = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
UpperCamelCase_ = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
UpperCamelCase_ = generate_example_dataset(
os.path.join(a__ , """dataset.arrow""" ) , a__ , num_examples=a__ , seq_shapes={"""list""": (100,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(a__ ) )
UpperCamelCase_ = func(a__ , **a__ )
print("""shuffling dataset""" )
UpperCamelCase_ = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(a__ ) )
UpperCamelCase_ = func(
a__ , **a__ )
with open(a__ , """wb""" ) as f:
f.write(json.dumps(a__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 122 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_A = '''\
'''
_A = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
_A = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1_6 , __UpperCamelCase = True , __UpperCamelCase=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
UpperCamelCase_ = """cuda"""
else:
UpperCamelCase_ = """cuda""" if torch.cuda.is_available() else """cpu"""
UpperCamelCase_ = AutoModelForCausalLM.from_pretrained(__UpperCamelCase )
UpperCamelCase_ = model.to(__UpperCamelCase )
UpperCamelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
UpperCamelCase_ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__UpperCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
UpperCamelCase_ = model.config.max_length - 1
else:
UpperCamelCase_ = model.config.max_length
UpperCamelCase_ = tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors="""pt""" , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase )
UpperCamelCase_ = encodings["""input_ids"""]
UpperCamelCase_ = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
UpperCamelCase_ = []
UpperCamelCase_ = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ):
UpperCamelCase_ = min(start_index + batch_size , len(__UpperCamelCase ) )
UpperCamelCase_ = encoded_texts[start_index:end_index]
UpperCamelCase_ = attn_masks[start_index:end_index]
if add_start_token:
UpperCamelCase_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase )
UpperCamelCase_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
UpperCamelCase_ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 )
UpperCamelCase_ = encoded_batch
with torch.no_grad():
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits
UpperCamelCase_ = out_logits[..., :-1, :].contiguous()
UpperCamelCase_ = labels[..., 1:].contiguous()
UpperCamelCase_ = attn_mask[..., 1:].contiguous()
UpperCamelCase_ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
| 122 | 1 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCAmelCase_ = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure) | 116 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = ['''input_values''', '''attention_mask''']
def __init__( self : Optional[Any] , _A : int = 1 , _A : int = 16_000 , _A : float = 0.0 , _A : bool = False , _A : int = 80 , _A : int = 16 , _A : int = 64 , _A : str = "hann_window" , _A : float = 1.0 , _A : float = 80 , _A : float = 7_600 , _A : float = 1E-10 , _A : int = 2 , _A : bool = True , **_A : int , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A )
lowercase : str = do_normalize
lowercase : int = return_attention_mask
lowercase : Union[str, Any] = num_mel_bins
lowercase : Union[str, Any] = hop_length
lowercase : Dict = win_length
lowercase : Union[str, Any] = win_function
lowercase : int = frame_signal_scale
lowercase : Dict = fmin
lowercase : Optional[Any] = fmax
lowercase : str = mel_floor
lowercase : Dict = reduction_factor
lowercase : List[Any] = win_length * sampling_rate // 1_000
lowercase : Union[str, Any] = hop_length * sampling_rate // 1_000
lowercase : Optional[Any] = optimal_fft_length(self.sample_size )
lowercase : Dict = (self.n_fft // 2) + 1
lowercase : Any = window_function(window_length=self.sample_size , name=self.win_function , periodic=_A )
lowercase : Dict = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
lowercase : Optional[int] = np.array(_A , np.intaa )
lowercase : Dict = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
lowercase : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase : List[str] = padding_value
normed_input_values.append(_A )
else:
lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __a ( self : Any , _A : np.ndarray , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = spectrogram(
_A , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self : List[Any] , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[int] = None , **_A : Tuple , ) -> BatchFeature:
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
lowercase : Any = self._process_audio(
_A , _A , _A , _A , _A , _A , _A , _A , **_A , )
else:
lowercase : Any = None
if audio_target is not None:
lowercase : Tuple = self._process_audio(
_A , _A , _A , _A , _A , _A , _A , _A , **_A , )
if inputs is None:
return inputs_target
else:
lowercase : Any = inputs_target['''input_values''']
lowercase : Dict = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
lowercase : Union[str, Any] = decoder_attention_mask
return inputs
def __a ( self : List[Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = False , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , **_A : Any , ) -> BatchFeature:
"""simple docstring"""
lowercase : Optional[int] = isinstance(_A , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase : int = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : Optional[int] = [np.asarray(_A , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
lowercase : List[str] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowercase : List[str] = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : Union[str, Any] = [speech]
# needed to make pad() work on spectrogram inputs
lowercase : Any = self.feature_size
# convert into correct format for padding
if is_target:
lowercase : int = [self._extract_mel_features(_A ) for waveform in speech]
lowercase : Any = BatchFeature({'''input_values''': features} )
lowercase : Optional[Any] = self.num_mel_bins
else:
lowercase : Optional[Any] = BatchFeature({'''input_values''': speech} )
lowercase : Optional[int] = self.pad(
_A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , )
lowercase : str = feature_size_hack
# convert input values to correct format
lowercase : List[Any] = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
lowercase : List[str] = [np.asarray(_A , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_A , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowercase : List[str] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_A , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowercase : Optional[Any] = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowercase : int = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
lowercase : Union[str, Any] = [np.asarray(_A , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowercase : Any = (
attention_mask
if self._get_padding_strategies(_A , max_length=_A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowercase : Optional[int] = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=_A , padding_value=self.padding_value )
if return_tensors is not None:
lowercase : Tuple = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __a ( self : Optional[Any] ) -> Dict[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowercase : Optional[int] = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output | 116 | 1 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Optional[int] = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class snake_case ( snake_case__ ):
"""simple docstring"""
snake_case__ = '''data2vec-audio'''
def __init__( self : int ,lowerCamelCase__ : Dict=32 ,lowerCamelCase__ : Dict=768 ,lowerCamelCase__ : Any=12 ,lowerCamelCase__ : List[Any]=12 ,lowerCamelCase__ : int=3_072 ,lowerCamelCase__ : Optional[int]="gelu" ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : Tuple=0.0 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Tuple=0.0_2 ,lowerCamelCase__ : Union[str, Any]=1e-5 ,lowerCamelCase__ : Union[str, Any]="gelu" ,lowerCamelCase__ : Tuple=(512, 512, 512, 512, 512, 512, 512) ,lowerCamelCase__ : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) ,lowerCamelCase__ : int=(10, 3, 3, 3, 3, 2, 2) ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : str=16 ,lowerCamelCase__ : Tuple=19 ,lowerCamelCase__ : str=5 ,lowerCamelCase__ : Dict=0.0_5 ,lowerCamelCase__ : Tuple=10 ,lowerCamelCase__ : Optional[Any]=2 ,lowerCamelCase__ : str=0.0 ,lowerCamelCase__ : str=10 ,lowerCamelCase__ : Optional[int]=0 ,lowerCamelCase__ : Tuple="sum" ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : Any=256 ,lowerCamelCase__ : Optional[int]=(512, 512, 512, 512, 1_500) ,lowerCamelCase__ : str=(5, 3, 3, 1, 1) ,lowerCamelCase__ : Tuple=(1, 2, 3, 1, 1) ,lowerCamelCase__ : Dict=512 ,lowerCamelCase__ : Dict=0 ,lowerCamelCase__ : Optional[Any]=1 ,lowerCamelCase__ : Union[str, Any]=2 ,lowerCamelCase__ : int=False ,lowerCamelCase__ : int=3 ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : Optional[Any]=3 ,lowerCamelCase__ : Tuple=None ,**lowerCamelCase__ : List[str] ,):
super().__init__(**_A ,pad_token_id=_A ,bos_token_id=_A ,eos_token_id=_A )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = feat_extract_activation
UpperCAmelCase__ = list(_A )
UpperCAmelCase__ = list(_A )
UpperCAmelCase__ = list(_A )
UpperCAmelCase__ = conv_bias
UpperCAmelCase__ = num_conv_pos_embeddings
UpperCAmelCase__ = num_conv_pos_embedding_groups
UpperCAmelCase__ = conv_pos_kernel_size
UpperCAmelCase__ = len(self.conv_dim )
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = feat_proj_dropout
UpperCAmelCase__ = final_dropout
UpperCAmelCase__ = layerdrop
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase__ = mask_time_prob
UpperCAmelCase__ = mask_time_length
UpperCAmelCase__ = mask_time_min_masks
UpperCAmelCase__ = mask_feature_prob
UpperCAmelCase__ = mask_feature_length
UpperCAmelCase__ = mask_feature_min_masks
# ctc loss
UpperCAmelCase__ = ctc_loss_reduction
UpperCAmelCase__ = ctc_zero_infinity
# adapter
UpperCAmelCase__ = add_adapter
UpperCAmelCase__ = adapter_kernel_size
UpperCAmelCase__ = adapter_stride
UpperCAmelCase__ = num_adapter_layers
UpperCAmelCase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase__ = list(_A )
UpperCAmelCase__ = list(_A )
UpperCAmelCase__ = list(_A )
UpperCAmelCase__ = xvector_output_dim
@property
def __lowerCAmelCase ( self : Optional[Any] ):
return math.prod(self.conv_stride )
| 98 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : Optional[int] = {
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Union[str, Any] = [
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Optional[Any] = [
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCamelCase_ ( A__ : str , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = list(A__ )
lowerCAmelCase_ : List[str] = list(A__ )
lowerCAmelCase_ : Dict = 0
for i in range(len(A__ ) ):
if lista[i] != lista[i]:
count += 1
lowerCAmelCase_ : Tuple = """_"""
if count > 1:
return False
else:
return "".join(A__ )
def UpperCamelCase_ ( A__ : list[str] ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = []
while True:
lowerCAmelCase_ : Dict = ["""$"""] * len(A__ )
lowerCAmelCase_ : Tuple = []
for i in range(len(A__ ) ):
for j in range(i + 1 , len(A__ ) ):
lowerCAmelCase_ : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
lowerCAmelCase_ : Optional[Any] = """*"""
lowerCAmelCase_ : int = """*"""
temp.append("""X""" )
for i in range(len(A__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(A__ ) == 0:
return pi
lowerCAmelCase_ : List[Any] = list(set(A__ ) )
def UpperCamelCase_ ( A__ : int , A__ : Sequence[float] ):
'''simple docstring'''
lowerCAmelCase_ : int = []
for minterm in minterms:
lowerCAmelCase_ : Optional[Any] = """"""
for _ in range(A__ ):
lowerCAmelCase_ : Any = str(minterm % 2 ) + string
minterm //= 2
temp.append(A__ )
return temp
def UpperCamelCase_ ( A__ : str , A__ : str , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : int = list(A__ )
lowerCAmelCase_ : Dict = list(A__ )
lowerCAmelCase_ : Any = 0
for i in range(len(A__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCamelCase_ ( A__ : list[list[int]] , A__ : list[str] ):
'''simple docstring'''
lowerCAmelCase_ : int = []
lowerCAmelCase_ : List[Any] = [0] * len(A__ )
for i in range(len(chart[0] ) ):
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : List[str] = -1
for j in range(len(A__ ) ):
if chart[j][i] == 1:
count += 1
lowerCAmelCase_ : Union[str, Any] = j
if count == 1:
lowerCAmelCase_ : Optional[Any] = 1
for i in range(len(A__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(A__ ) ):
lowerCAmelCase_ : Dict = 0
temp.append(prime_implicants[i] )
while True:
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Tuple = -1
lowerCAmelCase_ : List[str] = 0
for i in range(len(A__ ) ):
lowerCAmelCase_ : List[Any] = chart[i].count(1 )
if count_n > max_n:
lowerCAmelCase_ : Optional[Any] = count_n
lowerCAmelCase_ : List[str] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(A__ ) ):
lowerCAmelCase_ : Any = 0
def UpperCamelCase_ ( A__ : list[str] , A__ : list[str] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = [[0 for x in range(len(A__ ) )] for x in range(len(A__ ) )]
for i in range(len(A__ ) ):
lowerCAmelCase_ : str = prime_implicants[i].count("""_""" )
for j in range(len(A__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , A__ ):
lowerCAmelCase_ : List[Any] = 1
return chart
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : str = int(input("""Enter the no. of variables\n""" ) )
lowerCAmelCase_ : Optional[int] = [
float(A__ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
lowerCAmelCase_ : List[str] = decimal_to_binary(A__ , A__ )
lowerCAmelCase_ : List[Any] = check(A__ )
print("""Prime Implicants are:""" )
print(A__ )
lowerCAmelCase_ : List[str] = prime_implicant_chart(A__ , A__ )
lowerCAmelCase_ : Any = selection(A__ , A__ )
print("""Essential Prime Implicants are:""" )
print(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : int = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'efficientformer'
def __init__( self : Any , lowerCamelCase : List[int] = [3, 2, 6, 4] , lowerCamelCase : List[int] = [48, 96, 2_24, 4_48] , lowerCamelCase : List[bool] = [True, True, True, True] , lowerCamelCase : int = 4_48 , lowerCamelCase : int = 32 , lowerCamelCase : int = 4 , lowerCamelCase : int = 7 , lowerCamelCase : int = 5 , lowerCamelCase : int = 8 , lowerCamelCase : int = 4 , lowerCamelCase : float = 0.0 , lowerCamelCase : int = 16 , lowerCamelCase : int = 3 , lowerCamelCase : int = 3 , lowerCamelCase : int = 3 , lowerCamelCase : int = 2 , lowerCamelCase : int = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : int = 1 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , lowerCamelCase : float = 1E-5 , lowerCamelCase : str = "gelu" , lowerCamelCase : float = 0.02 , lowerCamelCase : float = 1E-12 , lowerCamelCase : int = 2_24 , lowerCamelCase : float = 1E-05 , **lowerCamelCase : int , ) -> None:
super().__init__(**lowerCamelCase )
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = hidden_sizes
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Tuple = initializer_range
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : int = patch_size
lowerCAmelCase_ : List[str] = num_channels
lowerCAmelCase_ : Dict = depths
lowerCAmelCase_ : int = mlp_expansion_ratio
lowerCAmelCase_ : Optional[Any] = downsamples
lowerCAmelCase_ : Union[str, Any] = dim
lowerCAmelCase_ : Union[str, Any] = key_dim
lowerCAmelCase_ : str = attention_ratio
lowerCAmelCase_ : Tuple = resolution
lowerCAmelCase_ : Optional[Any] = pool_size
lowerCAmelCase_ : str = downsample_patch_size
lowerCAmelCase_ : Dict = downsample_stride
lowerCAmelCase_ : str = downsample_pad
lowerCAmelCase_ : str = drop_path_rate
lowerCAmelCase_ : List[Any] = num_metaad_blocks
lowerCAmelCase_ : Tuple = distillation
lowerCAmelCase_ : Optional[Any] = use_layer_scale
lowerCAmelCase_ : Dict = layer_scale_init_value
lowerCAmelCase_ : Optional[Any] = image_size
lowerCAmelCase_ : Optional[Any] = batch_norm_eps
| 89 | 1 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ ) -> Optional[int]:
super().__init__()
__UpperCamelCase =nn.ModuleList(A_ )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = False , A_ = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(A_ , A_ , self.nets ) ):
__UpperCamelCase , __UpperCamelCase =controlnet(
A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , )
# merge samples
if i == 0:
__UpperCamelCase , __UpperCamelCase =down_samples, mid_sample
else:
__UpperCamelCase =[
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A_ , A_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _a ( self , A_ , A_ = True , A_ = None , A_ = False , A_ = None , ) -> Optional[Any]:
__UpperCamelCase =0
__UpperCamelCase =save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A_ , is_main_process=A_ , save_function=A_ , safe_serialization=A_ , variant=A_ , )
idx += 1
__UpperCamelCase =model_path_to_save + f'_{idx}'
@classmethod
def _a ( cls , A_ , **A_ ) -> List[str]:
__UpperCamelCase =0
__UpperCamelCase =[]
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__UpperCamelCase =pretrained_model_path
while os.path.isdir(A_ ):
__UpperCamelCase =ControlNetModel.from_pretrained(A_ , **A_ )
controlnets.append(A_ )
idx += 1
__UpperCamelCase =pretrained_model_path + f'_{idx}'
logger.info(f'{len(A_ )} controlnets loaded from {pretrained_model_path}.' )
if len(A_ ) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(A_ )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(A_ )
| 62 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
A__ = len(lowercase_ )
while cur > 1:
# Find the maximum number in arr
A__ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
A__ = arr[mi::-1] + arr[mi + 1 : len(lowercase_ )]
# Reverse whole list
A__ = arr[cur - 1 :: -1] + arr[cur : len(lowercase_ )]
cur -= 1
return arr
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 247 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Any ) -> str:
_lowerCamelCase = WavaVecaForSequenceClassification.from_pretrained(lowercase_ , config=lowercase_ )
_lowerCamelCase = downstream_dict['''projector.weight''']
_lowerCamelCase = downstream_dict['''projector.bias''']
_lowerCamelCase = downstream_dict['''model.post_net.linear.weight''']
_lowerCamelCase = downstream_dict['''model.post_net.linear.bias''']
return model
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : int , lowercase_ : Optional[int] ) -> Optional[int]:
_lowerCamelCase = WavaVecaForAudioFrameClassification.from_pretrained(lowercase_ , config=lowercase_ )
_lowerCamelCase = downstream_dict['''model.linear.weight''']
_lowerCamelCase = downstream_dict['''model.linear.bias''']
return model
def lowerCAmelCase_( lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Dict ) -> Dict:
_lowerCamelCase = WavaVecaForXVector.from_pretrained(lowercase_ , config=lowercase_ )
_lowerCamelCase = downstream_dict['''connector.weight''']
_lowerCamelCase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_lowerCamelCase = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
_lowerCamelCase = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
_lowerCamelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
_lowerCamelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
_lowerCamelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
_lowerCamelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
_lowerCamelCase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Tuple ) -> Dict:
_lowerCamelCase = torch.load(lowercase_ , map_location='''cpu''' )
_lowerCamelCase = checkpoint['''Downstream''']
_lowerCamelCase = WavaVecaConfig.from_pretrained(lowercase_ )
_lowerCamelCase = WavaVecaFeatureExtractor.from_pretrained(
lowercase_ , return_attention_mask=lowercase_ , do_normalize=lowercase_ )
_lowerCamelCase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
_lowerCamelCase = convert_classification(lowercase_ , lowercase_ , lowercase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
_lowerCamelCase = convert_diarization(lowercase_ , lowercase_ , lowercase_ )
elif arch.endswith('''ForXVector''' ):
_lowerCamelCase = convert_xvector(lowercase_ , lowercase_ , lowercase_ )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
_lowerCamelCase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowercase_ )
hf_model.save_pretrained(lowercase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 73 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : List[str] ) -> Optional[Any]:
_lowerCamelCase = len(lowercase_ )
while cur > 1:
# Find the maximum number in arr
_lowerCamelCase = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
_lowerCamelCase = arr[mi::-1] + arr[mi + 1 : len(lowercase_ )]
# Reverse whole list
_lowerCamelCase = arr[cur - 1 :: -1] + arr[cur : len(lowercase_ )]
cur -= 1
return arr
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = input('''Enter numbers separated by a comma:\n''').strip()
__SCREAMING_SNAKE_CASE : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted))
| 73 | 1 |
from manim import *
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =Rectangle(height=0.5 ,width=0.5 )
SCREAMING_SNAKE_CASE =Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE =Rectangle(height=0.25 ,width=0.25 )
SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE =VGroup(*snake_case ).arrange(snake_case ,buff=0 )
SCREAMING_SNAKE_CASE =VGroup(*snake_case ).arrange(snake_case ,buff=0 )
SCREAMING_SNAKE_CASE =VGroup(snake_case ,snake_case ).arrange(snake_case ,buff=0 )
SCREAMING_SNAKE_CASE =Text('CPU' ,font_size=24 )
SCREAMING_SNAKE_CASE =Group(snake_case ,snake_case ).arrange(snake_case ,buff=0.5 ,aligned_edge=snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(snake_case )
SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE =VGroup(*snake_case ).arrange(snake_case ,buff=0 )
SCREAMING_SNAKE_CASE =Text('GPU' ,font_size=24 )
SCREAMING_SNAKE_CASE =Group(snake_case ,snake_case ).arrange(snake_case ,buff=0.5 ,aligned_edge=snake_case )
gpu.move_to([-1, -1, 0] )
self.add(snake_case )
SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE =VGroup(*snake_case ).arrange(snake_case ,buff=0 )
SCREAMING_SNAKE_CASE =Text('Model' ,font_size=24 )
SCREAMING_SNAKE_CASE =Group(snake_case ,snake_case ).arrange(snake_case ,buff=0.5 ,aligned_edge=snake_case )
model.move_to([3, -1.0, 0] )
self.add(snake_case )
SCREAMING_SNAKE_CASE =[]
SCREAMING_SNAKE_CASE =[]
for i, rect in enumerate(snake_case ):
SCREAMING_SNAKE_CASE =fill.copy().set_fill(snake_case ,opacity=0.8 )
target.move_to(snake_case )
model_arr.append(snake_case )
SCREAMING_SNAKE_CASE =Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(snake_case )
self.add(*snake_case ,*snake_case )
SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE =VGroup(*snake_case ).arrange(snake_case ,buff=0 )
SCREAMING_SNAKE_CASE =VGroup(*snake_case ).arrange(snake_case ,buff=0 )
SCREAMING_SNAKE_CASE =VGroup(snake_case ,snake_case ).arrange(snake_case ,buff=0 )
SCREAMING_SNAKE_CASE =Text('Disk' ,font_size=24 )
SCREAMING_SNAKE_CASE =Group(snake_case ,snake_case ).arrange(snake_case ,buff=0.5 ,aligned_edge=snake_case )
disk.move_to([-4, -1.25, 0] )
self.add(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE =MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(snake_case ,snake_case )
SCREAMING_SNAKE_CASE =MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(snake_case ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(snake_case )
SCREAMING_SNAKE_CASE =MarkupText(
f'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case ) )
SCREAMING_SNAKE_CASE =Square(0.3 )
input.set_fill(snake_case ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,snake_case ,buff=0.5 )
self.play(Write(snake_case ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=snake_case ,buff=0.02 )
self.play(MoveToTarget(snake_case ) )
self.play(FadeOut(snake_case ) )
SCREAMING_SNAKE_CASE =Arrow(start=snake_case ,end=snake_case ,color=snake_case ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,snake_case ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE =MarkupText(
f'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case ,run_time=3 ) )
SCREAMING_SNAKE_CASE ={'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(snake_case ) ,Circumscribe(model_arr[0] ,color=snake_case ,**snake_case ) ,Circumscribe(model_cpu_arr[0] ,color=snake_case ,**snake_case ) ,Circumscribe(gpu_rect[0] ,color=snake_case ,**snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE =a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,snake_case ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE =AnimationGroup(
FadeOut(snake_case ,run_time=0.5 ) ,MoveToTarget(snake_case ,run_time=0.5 ) ,FadeIn(snake_case ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(snake_case )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE =0.7
self.play(
Circumscribe(model_arr[i] ,**snake_case ) ,Circumscribe(cpu_left_col_base[i] ,**snake_case ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=snake_case ,**snake_case ) ,Circumscribe(gpu_rect[0] ,color=snake_case ,**snake_case ) ,Circumscribe(model_arr[i + 1] ,color=snake_case ,**snake_case ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=snake_case ,**snake_case ) ,Circumscribe(cpu_left_col_base[-1] ,color=snake_case ,**snake_case ) ,Circumscribe(gpu_rect[0] ,color=snake_case ,**snake_case ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE =a_c
SCREAMING_SNAKE_CASE =a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(snake_case ) ,FadeOut(snake_case ,run_time=0.5 ) ,)
SCREAMING_SNAKE_CASE =MarkupText(f'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(snake_case ,run_time=3 ) ,MoveToTarget(snake_case ) )
self.wait()
| 334 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
_lowerCamelCase ="\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n"
_lowerCamelCase ="\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n"
_lowerCamelCase ="\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='https://github.com/krishnap25/mauve' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/krishnap25/mauve'] ,reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] ,)
def _lowerCAmelCase ( self : Union[str, Any] ,snake_case : Optional[int] ,snake_case : str ,snake_case : List[str]=None ,snake_case : str=None ,snake_case : int=None ,snake_case : Union[str, Any]=None ,snake_case : Optional[int]="auto" ,snake_case : List[str]=-1 ,snake_case : Union[str, Any]=0.9 ,snake_case : Tuple=5 ,snake_case : Union[str, Any]=500 ,snake_case : Union[str, Any]="gpt2-large" ,snake_case : Union[str, Any]=-1 ,snake_case : Optional[Any]=1024 ,snake_case : Optional[Any]=25 ,snake_case : List[str]=5 ,snake_case : List[str]=True ,snake_case : Optional[Any]=25 ,):
SCREAMING_SNAKE_CASE =compute_mauve(
p_text=snake_case ,q_text=snake_case ,p_features=snake_case ,q_features=snake_case ,p_tokens=snake_case ,q_tokens=snake_case ,num_buckets=snake_case ,pca_max_data=snake_case ,kmeans_explained_var=snake_case ,kmeans_num_redo=snake_case ,kmeans_max_iter=snake_case ,featurize_model_name=snake_case ,device_id=snake_case ,max_text_length=snake_case ,divergence_curve_discretization_size=snake_case ,mauve_scaling_factor=snake_case ,verbose=snake_case ,seed=snake_case ,)
return out
| 334 | 1 |
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class __lowerCamelCase ( __snake_case ):
def __init__( self , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase ) -> Union[str, Any]:
snake_case_ = parent
snake_case_ = config_class
snake_case_ = has_text_modality
snake_case_ = kwargs
snake_case_ = common_properties
def lowerCAmelCase_ ( self ) -> List[Any]:
snake_case_ = self.config_class(**self.inputs_dict )
snake_case_ = (
["""hidden_size""", """num_attention_heads""", """num_hidden_layers"""]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["""vocab_size"""] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(lowerCamelCase , lowerCamelCase ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(lowerCamelCase ):
try:
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.parent.assertEqual(
getattr(lowerCamelCase , lowerCamelCase ) , lowerCamelCase , msg=f'''`{name} value {idx} expected, but was {getattr(lowerCamelCase , lowerCamelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(lowerCamelCase ):
try:
snake_case_ = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(lowerCamelCase , lowerCamelCase ) , lowerCamelCase , msg=f'''`{name} value {idx} expected, but was {getattr(lowerCamelCase , lowerCamelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def lowerCAmelCase_ ( self ) -> Optional[int]:
snake_case_ = self.config_class(**self.inputs_dict )
snake_case_ = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case_ = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = os.path.join(lowerCamelCase , """config.json""" )
config_first.to_json_file(lowerCamelCase )
snake_case_ = self.config_class.from_json_file(lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case_ = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(lowerCamelCase )
snake_case_ = self.config_class.from_pretrained(lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCAmelCase_ ( self ) -> Any:
snake_case_ = self.config_class(**self.inputs_dict )
snake_case_ = """test"""
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ = os.path.join(lowerCamelCase , lowerCamelCase )
config_first.save_pretrained(lowerCamelCase )
snake_case_ = self.config_class.from_pretrained(lowerCamelCase , subfolder=lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def lowerCAmelCase_ ( self ) -> Any:
snake_case_ = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
snake_case_ = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def lowerCAmelCase_ ( self ) -> List[str]:
if self.config_class.is_composition:
return
snake_case_ = self.config_class()
self.parent.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase_ ( self ) -> Dict:
snake_case_ = copy.deepcopy(lowerCamelCase )
snake_case_ = self.config_class(**lowerCamelCase )
snake_case_ = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("""torch_dtype""", config.torch_dtype, torch.floataa) )
elif getattr(lowerCamelCase , lowerCamelCase ) != value:
wrong_values.append((key, getattr(lowerCamelCase , lowerCamelCase ), value) )
if len(lowerCamelCase ) > 0:
snake_case_ = """\n""".join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def lowerCAmelCase_ ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init() | 34 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''b0''': efficientnet.EfficientNetBa,
'''b1''': efficientnet.EfficientNetBa,
'''b2''': efficientnet.EfficientNetBa,
'''b3''': efficientnet.EfficientNetBa,
'''b4''': efficientnet.EfficientNetBa,
'''b5''': efficientnet.EfficientNetBa,
'''b6''': efficientnet.EfficientNetBa,
'''b7''': efficientnet.EfficientNetBa,
}
lowerCamelCase_ = {
'''b0''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.0,
'''image_size''': 224,
'''dropout_rate''': 0.2,
'''dw_padding''': [],
},
'''b1''': {
'''hidden_dim''': 1280,
'''width_coef''': 1.0,
'''depth_coef''': 1.1,
'''image_size''': 240,
'''dropout_rate''': 0.2,
'''dw_padding''': [16],
},
'''b2''': {
'''hidden_dim''': 1408,
'''width_coef''': 1.1,
'''depth_coef''': 1.2,
'''image_size''': 260,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 8, 16],
},
'''b3''': {
'''hidden_dim''': 1536,
'''width_coef''': 1.2,
'''depth_coef''': 1.4,
'''image_size''': 300,
'''dropout_rate''': 0.3,
'''dw_padding''': [5, 18],
},
'''b4''': {
'''hidden_dim''': 1792,
'''width_coef''': 1.4,
'''depth_coef''': 1.8,
'''image_size''': 380,
'''dropout_rate''': 0.4,
'''dw_padding''': [6],
},
'''b5''': {
'''hidden_dim''': 2048,
'''width_coef''': 1.6,
'''depth_coef''': 2.2,
'''image_size''': 456,
'''dropout_rate''': 0.4,
'''dw_padding''': [13, 27],
},
'''b6''': {
'''hidden_dim''': 2304,
'''width_coef''': 1.8,
'''depth_coef''': 2.6,
'''image_size''': 528,
'''dropout_rate''': 0.5,
'''dw_padding''': [31],
},
'''b7''': {
'''hidden_dim''': 2560,
'''width_coef''': 2.0,
'''depth_coef''': 3.1,
'''image_size''': 600,
'''dropout_rate''': 0.5,
'''dw_padding''': [18],
},
}
def UpperCamelCase( lowercase_ ) -> Tuple:
'''simple docstring'''
snake_case_ = EfficientNetConfig()
snake_case_ = CONFIG_MAP[model_name]["""hidden_dim"""]
snake_case_ = CONFIG_MAP[model_name]["""width_coef"""]
snake_case_ = CONFIG_MAP[model_name]["""depth_coef"""]
snake_case_ = CONFIG_MAP[model_name]["""image_size"""]
snake_case_ = CONFIG_MAP[model_name]["""dropout_rate"""]
snake_case_ = CONFIG_MAP[model_name]["""dw_padding"""]
snake_case_ = """huggingface/label-files"""
snake_case_ = """imagenet-1k-id2label.json"""
snake_case_ = 1000
snake_case_ = json.load(open(hf_hub_download(lowercase_ , lowercase_ , repo_type="""dataset""" ) , """r""" ) )
snake_case_ = {int(lowercase_ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase( ) -> Tuple:
'''simple docstring'''
snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
def UpperCamelCase( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = CONFIG_MAP[model_name]["""image_size"""]
snake_case_ = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=lowercase_ , )
return preprocessor
def UpperCamelCase( lowercase_ ) -> str:
'''simple docstring'''
snake_case_ = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
snake_case_ = sorted(set(lowercase_ ) )
snake_case_ = len(lowercase_ )
snake_case_ = {b: str(lowercase_ ) for b, i in zip(lowercase_ , range(lowercase_ ) )}
snake_case_ = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
snake_case_ = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
snake_case_ = {}
for item in rename_keys:
if item[0] in original_param_names:
snake_case_ = """efficientnet.""" + item[1]
snake_case_ = """classifier.weight"""
snake_case_ = """classifier.bias"""
return key_mapping
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
snake_case_ = key_mapping[key]
if "_conv" in key and "kernel" in key:
snake_case_ = torch.from_numpy(lowercase_ ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
snake_case_ = torch.from_numpy(lowercase_ ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
snake_case_ = torch.from_numpy(np.transpose(lowercase_ ) )
else:
snake_case_ = torch.from_numpy(lowercase_ )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase_ )
@torch.no_grad()
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
snake_case_ = model_classes[model_name](
include_top=lowercase_ , weights="""imagenet""" , input_tensor=lowercase_ , input_shape=lowercase_ , pooling=lowercase_ , classes=1000 , classifier_activation="""softmax""" , )
snake_case_ = original_model.trainable_variables
snake_case_ = original_model.non_trainable_variables
snake_case_ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
snake_case_ = param.numpy()
snake_case_ = list(tf_params.keys() )
# Load HuggingFace model
snake_case_ = get_efficientnet_config(lowercase_ )
snake_case_ = EfficientNetForImageClassification(lowercase_ ).eval()
snake_case_ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
snake_case_ = rename_keys(lowercase_ )
replace_params(lowercase_ , lowercase_ , lowercase_ )
# Initialize preprocessor and preprocess input image
snake_case_ = convert_image_processor(lowercase_ )
snake_case_ = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
snake_case_ = hf_model(**lowercase_ )
snake_case_ = outputs.logits.detach().numpy()
# Original model inference
snake_case_ = False
snake_case_ = CONFIG_MAP[model_name]["""image_size"""]
snake_case_ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
snake_case_ = image.img_to_array(lowercase_ )
snake_case_ = np.expand_dims(lowercase_ , axis=0 )
snake_case_ = original_model.predict(lowercase_ )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase_ , lowercase_ , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase_ ):
os.mkdir(lowercase_ )
# Save converted model and image processor
hf_model.save_pretrained(lowercase_ )
preprocessor.save_pretrained(lowercase_ )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
snake_case_ = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase_ )
hf_model.push_to_hub(lowercase_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''b0''',
type=str,
help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''hf_model''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''')
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
lowerCamelCase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub) | 34 | 1 |
import socket
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case_ = socket.gethostname()
snake_case_ = 12312
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
snake_case_ = sock.recv(1024 )
if not data:
break
out_file.write(UpperCamelCase__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 285 |
class lowercase :
def __init__( self , snake_case , snake_case , snake_case ):
snake_case_ = name
snake_case_ = value
snake_case_ = weight
def __repr__( self ):
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def a ( self ):
return self.value
def a ( self ):
return self.name
def a ( self ):
return self.weight
def a ( self ):
return self.value / self.weight
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = []
for i in range(len(UpperCamelCase__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = sorted(UpperCamelCase__ , key=UpperCamelCase__ , reverse=UpperCamelCase__ )
snake_case_ = []
snake_case_ , snake_case_ = 0.0, 0.0
for i in range(len(UpperCamelCase__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __lowerCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285 | 1 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=3 , _a=0.6 , _a=None , ) -> Any:
_A : List[Any] = parent
_A : Dict = batch_size
_A : Optional[int] = image_size
_A : Optional[int] = patch_size
_A : Dict = num_channels
_A : Dict = is_training
_A : Union[str, Any] = use_labels
_A : Union[str, Any] = hidden_size
_A : str = num_hidden_layers
_A : Tuple = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Optional[int] = hidden_act
_A : Union[str, Any] = hidden_dropout_prob
_A : List[str] = attention_probs_dropout_prob
_A : Any = type_sequence_label_size
_A : Optional[Any] = initializer_range
_A : List[Any] = mask_ratio
_A : str = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_A : List[Any] = (image_size // patch_size) ** 2
_A : List[str] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def a__ ( self ) -> Any:
_A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : Optional[int] = None
if self.use_labels:
_A : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : int = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> List[Any]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def a__ ( self , _a , _a , _a ) -> Tuple:
_A : Optional[int] = TFViTMAEModel(config=_a )
_A : Union[str, Any] = model(_a , training=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a ) -> Optional[Any]:
_A : Dict = TFViTMAEForPreTraining(_a )
_A : int = model(_a , training=_a )
# expected sequence length = num_patches
_A : Union[str, Any] = (self.image_size // self.patch_size) ** 2
_A : List[str] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_A : int = 1
_A : Optional[Any] = TFViTMAEForPreTraining(_a )
_A : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A : Optional[Any] = model(_a , training=_a )
_A : Union[str, Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def a__ ( self ) -> List[Any]:
_A : Optional[int] = self.prepare_config_and_inputs()
((_A) , (_A) , (_A)) : Tuple = config_and_inputs
_A : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_a = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> int:
_A : Dict = TFViTMAEModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def a__ ( self ) -> Tuple:
pass
def a__ ( self ) -> Any:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Optional[int] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_A : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , tf.keras.layers.Layer ) )
def a__ ( self ) -> str:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Union[str, Any] = model_class(_a )
_A : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : int = [*signature.parameters.keys()]
_A : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> Optional[Any]:
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> str:
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
def a__ ( self ) -> Optional[Any]:
# make the mask reproducible
np.random.seed(2 )
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
_A : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_A : Optional[int] = model_class(_a )
_A : Dict = self._prepare_for_class(_a , _a )
_A : str = model(_a , noise=_a )
_A : int = copy.deepcopy(self._prepare_for_class(_a , _a ) )
_A : str = model(**_a , noise=_a )
_A : int = outputs_dict[0].numpy()
_A : Any = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def a__ ( self ) -> Union[str, Any]:
# make the mask reproducible
np.random.seed(2 )
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
_A : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_a ):
_A : Tuple = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_a ):
_A : List[str] = v.numpy()
else:
_A : Union[str, Any] = np.array(_a )
return inputs_np_dict
for model_class in self.all_model_classes:
_A : Dict = model_class(_a )
_A : int = self._prepare_for_class(_a , _a )
_A : Any = prepare_numpy_arrays(_a )
_A : List[str] = model(_a , noise=_a )
_A : List[Any] = model(**_a , noise=_a )
self.assert_outputs_same(_a , _a )
def a__ ( self , _a , _a , _a ) -> List[str]:
# make masks reproducible
np.random.seed(2 )
_A : Tuple = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_A : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_A : Dict = tf.constant(_a )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_A : str = tf_noise
super().check_pt_tf_models(_a , _a , _a )
def a__ ( self ) -> Any:
# make mask reproducible
np.random.seed(2 )
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : str = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_a )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(_a , _a ),)
if isinstance(_a , _a )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_a , """_keras_serializable""" , _a )
}
_A : Any = int((config.image_size // config.patch_size) ** 2 )
_A : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_A : str = tf.convert_to_tensor(_a )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
_A : int = main_layer_class(_a )
_A : str = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_A : List[Any] = tf.keras.Model(_a , outputs=main_layer(_a ) )
_A : Any = model(_a )
with tempfile.TemporaryDirectory() as tmpdirname:
_A : List[Any] = os.path.join(_a , """keras_model.h5""" )
model.save(_a )
_A : Tuple = tf.keras.models.load_model(
_a , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_a , tf.keras.Model )
_A : List[Any] = model(_a )
self.assert_outputs_same(_a , _a )
@slow
def a__ ( self ) -> Optional[Any]:
# make mask reproducible
np.random.seed(2 )
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_A : str = int((config.image_size // config.patch_size) ** 2 )
_A : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_A : int = model_class(_a )
_A : List[str] = self._prepare_for_class(_a , _a )
_A : List[str] = model(_a , noise=_a )
if model_class.__name__ == "TFViTMAEModel":
_A : Tuple = outputs.last_hidden_state.numpy()
_A : Union[str, Any] = 0
else:
_A : Optional[int] = outputs.logits.numpy()
_A : Any = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a , saved_model=_a )
_A : Optional[int] = model_class.from_pretrained(_a )
_A : List[Any] = model(_a , noise=_a )
if model_class.__name__ == "TFViTMAEModel":
_A : int = after_outputs["""last_hidden_state"""].numpy()
_A : str = 0
else:
_A : Any = after_outputs["""logits"""].numpy()
_A : str = 0
_A : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_a , 1e-5 )
def a__ ( self ) -> List[str]:
# make mask reproducible
np.random.seed(2 )
_A , _A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_A : int = int((config.image_size // config.patch_size) ** 2 )
_A : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_A : Dict = model_class(_a )
_A : Any = self._prepare_for_class(_a , _a )
_A : int = model(_a , noise=_a )
_A : List[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_a )
_A : Dict = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_A : int = model_class.from_config(model.config )
_A : List[str] = new_model(_a ) # Build model
new_model.set_weights(model.get_weights() )
_A : Union[str, Any] = new_model(_a , noise=_a )
self.assert_outputs_same(_a , _a )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def a__ ( self ) -> List[str]:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def a__ ( self ) -> int:
pass
@slow
def a__ ( self ) -> str:
_A : int = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> Tuple:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Tuple:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_A : Union[str, Any] = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
_A : Tuple = self.default_image_processor
_A : Optional[int] = prepare_img()
_A : Tuple = image_processor(images=_a , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_A : Tuple = ViTMAEConfig()
_A : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_A : Tuple = np.random.uniform(size=(1, num_patches) )
# forward pass
_A : int = model(**_a , noise=_a )
# verify the logits
_A : Any = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , _a )
_A : str = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _a , atol=1e-4 )
| 343 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=None , ) -> Union[str, Any]:
_A : Optional[int] = parent
_A : Dict = batch_size
_A : Any = image_size
_A : Optional[int] = patch_size
_A : Optional[int] = num_channels
_A : List[Any] = is_training
_A : Optional[Any] = use_labels
_A : Any = hidden_size
_A : Any = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : int = intermediate_size
_A : Dict = hidden_act
_A : Optional[int] = hidden_dropout_prob
_A : str = attention_probs_dropout_prob
_A : Any = type_sequence_label_size
_A : str = initializer_range
_A : Tuple = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A : List[Any] = (image_size // patch_size) ** 2
_A : str = num_patches + 1
def a__ ( self ) -> Dict:
_A : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A : List[str] = None
if self.use_labels:
_A : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A : List[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self ) -> Union[str, Any]:
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a__ ( self , _a , _a , _a ) -> Dict:
_A : List[str] = ViTMSNModel(config=_a )
model.to(_a )
model.eval()
_A : List[str] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , _a , _a , _a ) -> List[str]:
_A : Union[str, Any] = self.type_sequence_label_size
_A : Tuple = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : Optional[int] = model(_a , labels=_a )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A : Dict = 1
_A : str = ViTMSNForImageClassification(_a )
model.to(_a )
model.eval()
_A : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A : int = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self ) -> Any:
_A : Optional[int] = self.prepare_config_and_inputs()
_A , _A , _A : Dict = config_and_inputs
_A : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( UpperCamelCase__,UpperCamelCase__,unittest.TestCase ):
_a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_a = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_a = False
_a = False
_a = False
_a = False
def a__ ( self ) -> Tuple:
_A : Tuple = ViTMSNModelTester(self )
_A : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def a__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def a__ ( self ) -> int:
pass
def a__ ( self ) -> Any:
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : Tuple = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def a__ ( self ) -> str:
_A , _A : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A : int = model_class(_a )
_A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A : str = [*signature.parameters.keys()]
_A : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def a__ ( self ) -> List[Any]:
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def a__ ( self ) -> Any:
_A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def a__ ( self ) -> int:
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : int = ViTMSNModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCAmelCase_ ( ):
_A : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a__ ( self ) -> int:
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[int]:
torch.manual_seed(2 )
_A : Tuple = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_a )
_A : Tuple = self.default_image_processor
_A : Dict = prepare_img()
_A : Optional[Any] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
_A : int = model(**_a )
# verify the logits
_A : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_A : Optional[int] = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 343 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""CLIPFeatureExtractor"""]
UpperCAmelCase = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 256 | """simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
UpperCAmelCase = """CompVis/stable-diffusion-v1-1"""
UpperCAmelCase = """CompVis/stable-diffusion-v1-2"""
UpperCAmelCase = """CompVis/stable-diffusion-v1-3"""
UpperCAmelCase = """CompVis/stable-diffusion-v1-4"""
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Tuple , __UpperCamelCase : AutoencoderKL , __UpperCamelCase : CLIPTextModel , __UpperCamelCase : CLIPTokenizer , __UpperCamelCase : UNetaDConditionModel , __UpperCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase : StableDiffusionSafetyChecker , __UpperCamelCase : CLIPImageProcessor , __UpperCamelCase : bool = True , ) -> int:
super()._init_()
_UpperCamelCase = StableDiffusionPipeline.from_pretrained(__UpperCamelCase )
_UpperCamelCase = StableDiffusionPipeline.from_pretrained(__UpperCamelCase )
_UpperCamelCase = StableDiffusionPipeline.from_pretrained(__UpperCamelCase )
_UpperCamelCase = StableDiffusionPipeline(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , requires_safety_checker=__UpperCamelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _UpperCamelCase ( self : List[Any] ) -> Dict[str, Any]:
return {k: getattr(self , __UpperCamelCase ) for k in self.config.keys() if not k.startswith('''_''' )}
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : Optional[Union[str, int]] = "auto" ) -> str:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def _UpperCamelCase ( self : int ) -> Tuple:
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : Dict , ) -> int:
return self.pipea(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
@torch.no_grad()
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : List[Any] , ) -> int:
return self.pipea(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
@torch.no_grad()
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : Any , ) -> Any:
return self.pipea(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
@torch.no_grad()
def _UpperCamelCase ( self : Any , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : Optional[Any] , ) -> Union[str, Any]:
return self.pipea(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
@torch.no_grad()
def _UpperCamelCase ( self : Any , __UpperCamelCase : Union[str, List[str]] , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 512 , __UpperCamelCase : int = 50 , __UpperCamelCase : float = 7.5 , __UpperCamelCase : Optional[Union[str, List[str]]] = None , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : Optional[torch.Generator] = None , __UpperCamelCase : Optional[torch.FloatTensor] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase : int = 1 , **__UpperCamelCase : List[str] , ) -> Optional[Any]:
_UpperCamelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__UpperCamelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
_UpperCamelCase = self.textaimg_sda_a(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
_UpperCamelCase = self.textaimg_sda_a(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
_UpperCamelCase = self.textaimg_sda_a(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
_UpperCamelCase = self.textaimg_sda_a(
prompt=__UpperCamelCase , height=__UpperCamelCase , width=__UpperCamelCase , num_inference_steps=__UpperCamelCase , guidance_scale=__UpperCamelCase , negative_prompt=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , eta=__UpperCamelCase , generator=__UpperCamelCase , latents=__UpperCamelCase , output_type=__UpperCamelCase , return_dict=__UpperCamelCase , callback=__UpperCamelCase , callback_steps=__UpperCamelCase , **__UpperCamelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 256 | 1 |
"""simple docstring"""
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCAmelCase__ : List[str] = logging.getLogger(__name__)
class UpperCAmelCase ( _a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple=None ):
"""simple docstring"""
super().__init__(
snake_case_ , question_encoder_tokenizer=snake_case_ , generator_tokenizer=snake_case_ , index=snake_case_ , init_retrieval=snake_case_ , )
_A: int = None
def __magic_name__ ( self : int , lowerCAmelCase_ : int ):
"""simple docstring"""
logger.info('''initializing retrieval''' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('''dist initialized''' )
# needs to be set manually
_A: Optional[Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_A: int = str(distributed_port + 1 )
_A: List[str] = dist.new_group(ranks=snake_case_ , backend='''gloo''' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('''dist not initialized / main''' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
return dist.get_rank(group=self.process_group ) == 0
def __magic_name__ ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int=torch.floataa ):
"""simple docstring"""
_A: str = torch.empty(snake_case_ , dtype=snake_case_ )
dist.scatter(snake_case_ , src=0 , scatter_list=snake_case_ , group=self.process_group )
return target_tensor
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
_A: Dict = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_A: Dict = next((addr for addr in addrs if addr.startswith('''e''' )) , snake_case_ )
return ifname
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : int ):
"""simple docstring"""
# single GPU training
if not dist.is_initialized():
_A: Union[str, Any] = self._main_retrieve(snake_case_ , snake_case_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(snake_case_ )
# distributed training
_A: Optional[int] = dist.get_world_size(group=self.process_group )
# gather logic
_A: str = None
if self._is_main():
_A: Any = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(snake_case_ )]
dist.gather(torch.tensor(snake_case_ ) , dst=0 , gather_list=snake_case_ , group=self.process_group )
# scatter logic
_A: Union[str, Any] = question_hidden_states.shape[0]
_A: List[str] = []
_A: Dict = []
if self._is_main():
assert len(snake_case_ ) == world_size
_A: Union[str, Any] = self._main_retrieve(torch.cat(snake_case_ ).numpy() , snake_case_ )
_A: Dict = torch.tensor(snake_case_ ), torch.tensor(snake_case_ )
_A: Union[str, Any] = self._chunk_tensor(snake_case_ , snake_case_ )
_A: Union[str, Any] = self._chunk_tensor(snake_case_ , snake_case_ )
_A: Union[str, Any] = self._scattered(snake_case_ , [n_queries, n_docs] , target_type=torch.intaa )
_A: Dict = self._scattered(snake_case_ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(snake_case_ )
| 371 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( a , a=0.999 , a="cosine" , ) -> int:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_A: Dict = []
for i in range(a ):
_A: Optional[int] = i / num_diffusion_timesteps
_A: Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a ) / alpha_bar_fn(a ) , a ) )
return torch.tensor(a , dtype=torch.floataa )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = [e.name for e in KarrasDiffusionSchedulers]
__UpperCamelCase : Tuple = 2
@register_to_config
def __init__( self : str , lowerCAmelCase_ : int = 1_0_0_0 , lowerCAmelCase_ : float = 0.00085 , lowerCAmelCase_ : float = 0.012 , lowerCAmelCase_ : str = "linear" , lowerCAmelCase_ : Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase_ : str = "epsilon" , lowerCAmelCase_ : Optional[bool] = False , lowerCAmelCase_ : Optional[bool] = False , lowerCAmelCase_ : float = 1.0 , lowerCAmelCase_ : str = "linspace" , lowerCAmelCase_ : int = 0 , ):
"""simple docstring"""
if trained_betas is not None:
_A: Optional[Any] = torch.tensor(lowerCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
_A: List[str] = torch.linspace(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_A: Optional[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_A: Tuple = betas_for_alpha_bar(lowerCAmelCase_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
_A: int = betas_for_alpha_bar(lowerCAmelCase_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_A: Union[str, Any] = 1.0 - self.betas
_A: Dict = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_A: str = use_karras_sigmas
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int]=None ):
"""simple docstring"""
if schedule_timesteps is None:
_A: List[str] = self.timesteps
_A: int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_A: Optional[int] = 1 if len(lowerCAmelCase_ ) > 1 else 0
else:
_A: int = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase_ ) else timestep
_A: List[str] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __magic_name__ ( self : int ):
"""simple docstring"""
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[float, torch.FloatTensor] , ):
"""simple docstring"""
_A: List[str] = self.index_for_timestep(lowerCAmelCase_ )
_A: str = self.sigmas[step_index]
_A: str = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, torch.device] = None , lowerCAmelCase_ : Optional[int] = None , ):
"""simple docstring"""
_A: Union[str, Any] = num_inference_steps
_A: str = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_A: Optional[Any] = np.linspace(0 , num_train_timesteps - 1 , lowerCAmelCase_ , dtype=lowerCAmelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_A: List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A: Dict = (np.arange(0 , lowerCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_A: Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_A: List[Any] = (np.arange(lowerCAmelCase_ , 0 , -step_ratio )).round().copy().astype(lowerCAmelCase_ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_A: Optional[int] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_A: str = np.log(lowerCAmelCase_ )
_A: int = np.interp(lowerCAmelCase_ , np.arange(0 , len(lowerCAmelCase_ ) ) , lowerCAmelCase_ )
if self.config.use_karras_sigmas:
_A: Optional[int] = self._convert_to_karras(in_sigmas=lowerCAmelCase_ , num_inference_steps=self.num_inference_steps )
_A: List[str] = np.array([self._sigma_to_t(lowerCAmelCase_ , lowerCAmelCase_ ) for sigma in sigmas] )
_A: Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_A: Optional[Any] = torch.from_numpy(lowerCAmelCase_ ).to(device=lowerCAmelCase_ )
_A: Tuple = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_A: str = torch.from_numpy(lowerCAmelCase_ )
_A: str = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCAmelCase_ ).startswith('''mps''' ):
# mps does not support float64
_A: List[Any] = timesteps.to(lowerCAmelCase_ , dtype=torch.floataa )
else:
_A: Optional[int] = timesteps.to(device=lowerCAmelCase_ )
# empty dt and derivative
_A: Dict = None
_A: List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_A: Dict = defaultdict(lowerCAmelCase_ )
def __magic_name__ ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ):
"""simple docstring"""
# get log sigma
_A: Tuple = np.log(lowerCAmelCase_ )
# get distribution
_A: List[str] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_A: Dict = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_A: int = low_idx + 1
_A: Optional[int] = log_sigmas[low_idx]
_A: Dict = log_sigmas[high_idx]
# interpolate sigmas
_A: Union[str, Any] = (low - log_sigma) / (low - high)
_A: Optional[Any] = np.clip(lowerCAmelCase_ , 0 , 1 )
# transform interpolation to time range
_A: Any = (1 - w) * low_idx + w * high_idx
_A: List[Any] = t.reshape(sigma.shape )
return t
def __magic_name__ ( self : Any , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
_A: float = in_sigmas[-1].item()
_A: float = in_sigmas[0].item()
_A: Union[str, Any] = 7.0 # 7.0 is the value used in the paper
_A: Optional[Any] = np.linspace(0 , 1 , lowerCAmelCase_ )
_A: Tuple = sigma_min ** (1 / rho)
_A: Optional[Any] = sigma_max ** (1 / rho)
_A: List[str] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
return self.dt is None
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase_ : Union[float, torch.FloatTensor] , lowerCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
_A: Optional[int] = self.index_for_timestep(lowerCAmelCase_ )
# advance index counter by 1
_A: Union[str, Any] = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_A: Optional[int] = self.sigmas[step_index]
_A: Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_A: Union[str, Any] = self.sigmas[step_index - 1]
_A: Optional[int] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_A: List[Any] = 0
_A: Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_A: Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next
_A: List[str] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_A: int = sigma_hat if self.state_in_first_order else sigma_next
_A: List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_A: Optional[int] = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_A: Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_A: Optional[int] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_A: List[Any] = sigma_next - sigma_hat
# store for 2nd order step
_A: str = derivative
_A: Any = dt
_A: Dict = sample
else:
# 2. 2nd order / Heun's method
_A: List[str] = (sample - pred_original_sample) / sigma_next
_A: str = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_A: Dict = self.dt
_A: int = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_A: int = None
_A: int = None
_A: Optional[Any] = None
_A: Optional[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase_ )
def __magic_name__ ( self : Any , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : torch.FloatTensor , ):
"""simple docstring"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples
_A: str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase_ ):
# mps does not support float64
_A: Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_A: Any = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_A: Union[str, Any] = self.timesteps.to(original_samples.device )
_A: int = timesteps.to(original_samples.device )
_A: str = [self.index_for_timestep(lowerCAmelCase_ , lowerCAmelCase_ ) for t in timesteps]
_A: Optional[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_A: List[str] = sigma.unsqueeze(-1 )
_A: Any = original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ):
"""simple docstring"""
return self.config.num_train_timesteps
| 301 | 0 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = burst_time[i]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 9_9999_9999
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(UpperCamelCase_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__SCREAMING_SNAKE_CASE = remaining_time[j]
__SCREAMING_SNAKE_CASE = j
__SCREAMING_SNAKE_CASE = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__SCREAMING_SNAKE_CASE = remaining_time[short]
if minm == 0:
__SCREAMING_SNAKE_CASE = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
__SCREAMING_SNAKE_CASE = False
# Find finish time of current process
__SCREAMING_SNAKE_CASE = increment_time + 1
# Calculate waiting time
__SCREAMING_SNAKE_CASE = finish_time - arrival_time[short]
__SCREAMING_SNAKE_CASE = finar - burst_time[short]
if waiting_time[short] < 0:
__SCREAMING_SNAKE_CASE = 0
# Increment time
increment_time += 1
return waiting_time
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
for i in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = burst_time[i] + waiting_time[i]
return turn_around_time
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = total_waiting_time + waiting_time[i]
__SCREAMING_SNAKE_CASE = total_turn_around_time + turn_around_time[i]
print(f"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
__magic_name__ = int(input())
__magic_name__ = [0] * no_of_processes
__magic_name__ = [0] * no_of_processes
__magic_name__ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
__magic_name__, __magic_name__ = map(int, input().split())
__magic_name__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__magic_name__ = burst_time
__magic_name__ = no_of_processes
__magic_name__ = waiting_time
__magic_name__ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__magic_name__ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 100 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
__SCREAMING_SNAKE_CASE = False
if num < 0:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = -num
__SCREAMING_SNAKE_CASE = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(UpperCamelCase_ ) for e in binary )
return "0b" + "".join(str(UpperCamelCase_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 | 1 |
def _a ( lowerCamelCase: list[list[int | float]] ) -> int:
'''simple docstring'''
__A = len(lowerCamelCase )
__A = len(matrix[0] )
__A = min(lowerCamelCase , lowerCamelCase )
for row in range(lowerCamelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , lowerCamelCase ):
__A = matrix[col][row] / matrix[row][row]
for i in range(lowerCamelCase , lowerCamelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
__A = True
for i in range(row + 1 , lowerCamelCase ):
if matrix[i][row] != 0:
__A , __A = matrix[i], matrix[row]
__A = False
break
if reduce:
rank -= 1
for i in range(lowerCamelCase ):
__A = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Dict = {'vocab_file': 'vocab.txt'}
snake_case__ : Dict = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
snake_case__ : Optional[int] = {
'openbmb/cpm-ant-10b': 1024,
}
def _a ( lowerCamelCase: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__A = collections.OrderedDict()
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as reader:
__A = reader.readlines()
for index, token in enumerate(lowerCamelCase ):
__A = token.rstrip('''\n''' )
__A = index
return vocab
class A_ ( _lowerCamelCase ):
def __init__(self :Any , _UpperCamelCase :Dict , _UpperCamelCase :Optional[int]="<unk>" , _UpperCamelCase :List[str]=200 )-> List[str]:
__A = vocab
__A = unk_token
__A = max_input_chars_per_word
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[Any] )-> str:
__A = list(_UpperCamelCase )
if len(_UpperCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
__A = 0
__A = []
while start < len(_UpperCamelCase ):
__A = len(_UpperCamelCase )
__A = None
while start < end:
__A = ''''''.join(chars[start:end] )
if substr in self.vocab:
__A = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_UpperCamelCase )
__A = end
return sub_tokens
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = False
def __init__(self :str , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :Any="<d>" , _UpperCamelCase :List[str]="</d>" , _UpperCamelCase :Dict="<s>" , _UpperCamelCase :Optional[Any]="</s>" , _UpperCamelCase :Optional[int]="<pad>" , _UpperCamelCase :List[str]="<unk>" , _UpperCamelCase :str="</n>" , _UpperCamelCase :Optional[int]="</_>" , _UpperCamelCase :Optional[Any]="left" , **_UpperCamelCase :Any , )-> Union[str, Any]:
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_UpperCamelCase , eod_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , unk_token=_UpperCamelCase , line_token=_UpperCamelCase , space_token=_UpperCamelCase , padding_side=_UpperCamelCase , **_UpperCamelCase , )
__A = bod_token
__A = eod_token
__A = load_vocab(_UpperCamelCase )
__A = self.encoder[space_token]
__A = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCamelCase : x[1] ) )
__A = {v: k for k, v in self.encoder.items()}
__A = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowerCAmelCase (self :Union[str, Any] )-> Dict:
return self.encoder[self.bod_token]
@property
def _lowerCAmelCase (self :Optional[int] )-> Dict:
return self.encoder[self.eod_token]
@property
def _lowerCAmelCase (self :Any )-> List[Any]:
return self.encoder["\n"]
@property
def _lowerCAmelCase (self :List[str] )-> int:
return len(self.encoder )
def _lowerCAmelCase (self :List[str] )-> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :Dict )-> Union[str, Any]:
__A = []
for x in jieba.cut(_UpperCamelCase , cut_all=_UpperCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_UpperCamelCase ) )
return output_tokens
def _lowerCAmelCase (self :str , _UpperCamelCase :int , **_UpperCamelCase :List[str] )-> Tuple:
__A = [i for i in token_ids if i >= 0]
__A = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_UpperCamelCase , **_UpperCamelCase )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Optional[int] )-> List[str]:
return token in self.encoder
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[str] )-> str:
return "".join(_UpperCamelCase )
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[Any] )-> List[Any]:
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase (self :Any , _UpperCamelCase :Tuple )-> int:
return self.decoder.get(_UpperCamelCase , self.unk_token )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :str , _UpperCamelCase :Optional[str] = None )-> Tuple[str]:
if os.path.isdir(_UpperCamelCase ):
__A = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__A = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
__A = 0
if " " in self.encoder:
__A = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
__A = self.encoder['''\n''']
del self.encoder["\n"]
__A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCamelCase : x[1] ) )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
__A = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[int] , _UpperCamelCase :List[int] = None )-> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :List[int] , _UpperCamelCase :Optional[List[int]] = None , _UpperCamelCase :bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase ))
return [1] + ([0] * len(_UpperCamelCase ))
| 250 | 1 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
A: str = logging.get_logger(__name__)
A: Tuple = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Tuple = 'trajectory_transformer'
__lowerCAmelCase : str = ['past_key_values']
__lowerCAmelCase : Optional[Any] = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=249 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=17 , _SCREAMING_SNAKE_CASE=25 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0006 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=50256 , _SCREAMING_SNAKE_CASE=50256 , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : Tuple = action_weight
UpperCAmelCase : List[Any] = reward_weight
UpperCAmelCase : Any = value_weight
UpperCAmelCase : Optional[Any] = max_position_embeddings
UpperCAmelCase : List[str] = block_size
UpperCAmelCase : List[Any] = action_dim
UpperCAmelCase : List[str] = observation_dim
UpperCAmelCase : str = transition_dim
UpperCAmelCase : Optional[Any] = learning_rate
UpperCAmelCase : Dict = n_layer
UpperCAmelCase : int = n_head
UpperCAmelCase : Optional[int] = n_embd
UpperCAmelCase : List[Any] = embd_pdrop
UpperCAmelCase : List[str] = attn_pdrop
UpperCAmelCase : Optional[Any] = resid_pdrop
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[int] = layer_norm_eps
UpperCAmelCase : List[str] = kaiming_initializer_range
UpperCAmelCase : Union[str, Any] = use_cache
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 109 | '''simple docstring'''
import os
import string
import sys
__lowercase = 1 << 8
__lowercase = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
__lowercase = KEYMAP['''up''']
__lowercase = KEYMAP['''left''']
if sys.platform == "win32":
__lowercase = []
__lowercase = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
__lowercase = ord(str(i))
def snake_case__ ( ) -> List[Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_A ) == 0:
# Read the keystroke
lowerCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_A )
if ord(_A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
lowerCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase = cha[1]
else:
lowerCAmelCase = ch.decode(_A )
else:
lowerCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase = sys.stdin.fileno()
lowerCAmelCase = termios.tcgetattr(_A )
try:
tty.setraw(_A )
lowerCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_A , termios.TCSADRAIN , _A )
return ch
def snake_case__ ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase = get_raw_chars()
if ord(_A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_A ) == KEYMAP["esc"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) == KEYMAP["mod_int"]:
lowerCAmelCase = get_raw_chars()
if ord(_A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 272 | 0 |
import re
import subprocess
import sys
lowercase_ = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
lowercase_ = subprocess.check_output(F'git diff --name-only {fork_point_sha}'.split()).decode("""utf-8""").split()
lowercase_ = "|".join(sys.argv[1:])
lowercase_ = re.compile(rF'^({joined_dirs}).*?\.py$')
lowercase_ = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 357 |
import os
from collections.abc import Iterator
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "." ):
for dir_path, dir_names, filenames in os.walk(SCREAMING_SNAKE_CASE_ ):
lowercase__ = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(SCREAMING_SNAKE_CASE_ )[1] in (".py", ".ipynb"):
yield os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).lstrip("./" )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return f'''{i * " "}*''' if i else "\n##"
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(SCREAMING_SNAKE_CASE_ ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(SCREAMING_SNAKE_CASE_ )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = "." ):
lowercase__ = ""
for filepath in sorted(good_file_paths(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ , lowercase__ = os.path.split(SCREAMING_SNAKE_CASE_ )
if filepath != old_path:
lowercase__ = print_path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase__ = f'''{filepath}/{filename}'''.replace(" " , "%20" )
lowercase__ = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(f'''{md_prefix(SCREAMING_SNAKE_CASE_ )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(""".""")
| 224 | 0 |
def lowerCAmelCase_ ( __a , __a , __a ) -> int:
"""simple docstring"""
def update_area_of_max_square(__a , __a ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
lowerCamelCase__: Union[str, Any] =update_area_of_max_square(__a , col + 1 )
lowerCamelCase__: Dict =update_area_of_max_square(row + 1 , col + 1 )
lowerCamelCase__: List[str] =update_area_of_max_square(row + 1 , __a )
if mat[row][col]:
lowerCamelCase__: Tuple =1 + min([right, diagonal, down] )
lowerCamelCase__: str =max(largest_square_area[0] , __a )
return sub_problem_sol
else:
return 0
lowerCamelCase__: Dict =[0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowerCAmelCase_ ( __a , __a , __a ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__a , __a , __a ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
lowerCamelCase__: str =update_area_of_max_square_using_dp_array(__a , col + 1 , __a )
lowerCamelCase__: str =update_area_of_max_square_using_dp_array(row + 1 , col + 1 , __a )
lowerCamelCase__: Dict =update_area_of_max_square_using_dp_array(row + 1 , __a , __a )
if mat[row][col]:
lowerCamelCase__: List[Any] =1 + min([right, diagonal, down] )
lowerCamelCase__: Optional[int] =max(largest_square_area[0] , __a )
lowerCamelCase__: Tuple =sub_problem_sol
return sub_problem_sol
else:
return 0
lowerCamelCase__: Union[str, Any] =[0]
lowerCamelCase__: Optional[int] =[[-1] * cols for _ in range(__a )]
update_area_of_max_square_using_dp_array(0 , 0 , __a )
return largest_square_area[0]
def lowerCAmelCase_ ( __a , __a , __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =[[0] * (cols + 1) for _ in range(rows + 1 )]
lowerCamelCase__: Tuple =0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowerCamelCase__: int =dp_array[row][col + 1]
lowerCamelCase__: Union[str, Any] =dp_array[row + 1][col + 1]
lowerCamelCase__: Any =dp_array[row + 1][col]
if mat[row][col] == 1:
lowerCamelCase__: str =1 + min(__a , __a , __a )
lowerCamelCase__: Tuple =max(dp_array[row][col] , __a )
else:
lowerCamelCase__: Tuple =0
return largest_square_area
def lowerCAmelCase_ ( __a , __a , __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =[0] * (cols + 1)
lowerCamelCase__: Dict =[0] * (cols + 1)
lowerCamelCase__: List[Any] =0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
lowerCamelCase__: Optional[int] =current_row[col + 1]
lowerCamelCase__: int =next_row[col + 1]
lowerCamelCase__: Optional[int] =next_row[col]
if mat[row][col] == 1:
lowerCamelCase__: Dict =1 + min(__a , __a , __a )
lowerCamelCase__: Dict =max(current_row[col] , __a )
else:
lowerCamelCase__: Tuple =0
lowerCamelCase__: List[Any] =current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 10 |
"""simple docstring"""
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case ( lowercase__ , lowercase__ ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowercase__ , lowercase__ ) ) )
def _snake_case ( lowercase__ , lowercase__ ):
if dataset.ndim != value_array.ndim:
_lowerCamelCase : Tuple = (
'Wrong input data\'s dimensions... '
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(lowercase__ )
try:
if dataset.shape[1] != value_array.shape[1]:
_lowerCamelCase : Optional[int] = (
'Wrong input data\'s shape... '
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(lowercase__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
_lowerCamelCase : int = (
'Input data have different datatype... '
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(lowercase__ )
_lowerCamelCase : Optional[int] = []
for value in value_array:
_lowerCamelCase : Tuple = euclidean(lowercase__ , dataset[0] )
_lowerCamelCase : Union[str, Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
_lowerCamelCase : Optional[Any] = euclidean(lowercase__ , lowercase__ )
if dist > temp_dist:
_lowerCamelCase : List[Any] = temp_dist
_lowerCamelCase : List[str] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case ( lowercase__ , lowercase__ ):
return np.dot(lowercase__ , lowercase__ ) / (norm(lowercase__ ) * norm(lowercase__ ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 96 | 0 |
"""simple docstring"""
import math
import os
import sys
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Dict = ''
try:
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as binary_file:
lowerCAmelCase__ :Any = binary_file.read()
for dat in data:
lowerCAmelCase__ :List[Any] = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
lexicon.pop(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = last_match_id
if math.loga(_SCREAMING_SNAKE_CASE ).is_integer():
for curr_key in lexicon:
lowerCAmelCase__ :Tuple = '0' + lexicon[curr_key]
lowerCAmelCase__ :int = bin(_SCREAMING_SNAKE_CASE )[2:]
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Tuple = {'0': '0', '1': '1'}
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = '', ''
lowerCAmelCase__ :Tuple = len(_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCAmelCase__ :Dict = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
index += 1
lowerCAmelCase__ :Tuple = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
lowerCAmelCase__ :Any = lexicon[curr_string]
result += last_match_id
return result
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = os.path.getsize(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = bin(_SCREAMING_SNAKE_CASE )[2:]
lowerCAmelCase__ :Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
return "0" * (length_length - 1) + file_length_binary + compressed
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
lowerCAmelCase__ :int = 8
try:
with open(_SCREAMING_SNAKE_CASE , 'wb' ) as opened_file:
lowerCAmelCase__ :List[str] = [
to_write[i : i + byte_length]
for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
lowerCAmelCase__ :Dict = read_file_binary(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = compress_data(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = add_file_length(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
write_file_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 254 |
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
lowerCAmelCase__ :int = credit_card_number
lowerCAmelCase__ :Tuple = 0
lowerCAmelCase__ :int = len(_SCREAMING_SNAKE_CASE ) - 2
for i in range(_SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
lowerCAmelCase__ :Optional[Any] = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCAmelCase__ :str = cc_number[:i] + str(_SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __A (_SCREAMING_SNAKE_CASE ) ->bool:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_SCREAMING_SNAKE_CASE ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_SCREAMING_SNAKE_CASE ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_SCREAMING_SNAKE_CASE ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 254 | 1 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__a : int = logging.getLogger(__name__)
__a : str = tf.data.AUTOTUNE
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=lowercase , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=lowercase , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=lowercase , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=lowercase , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=lowercase , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=lowercase , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=lowercase , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=lowercase , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=lowercase , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=lowercase , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=lowercase , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=lowercase , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=lowercase , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=lowercase , default=0.15 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=lowercase , required=lowercase , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=lowercase , help='''Model ID to upload to on the Hugging Face Hub.''' )
__lowercase = parser.parse_args()
return args
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
try:
if args.tpu_name:
__lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
__lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(lowercase )
tf.tpu.experimental.initialize_tpu_system(lowercase )
return tpu
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
__lowercase = 0
for file in file_list:
__lowercase = file.split('''/''' )[-1]
__lowercase = re.search(r'''-\d+-(\d+)\.tfrecord''' , lowercase ).group(1 )
__lowercase = int(lowercase )
num_samples += sample_count
return num_samples
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None ):
"""simple docstring"""
__lowercase = count_samples(lowercase )
__lowercase = tf.data.Dataset.from_tensor_slices(lowercase )
if shuffle:
__lowercase = dataset.shuffle(len(lowercase ) )
__lowercase = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
__lowercase = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) )
__lowercase = dataset.map(lowercase , num_parallel_calls=lowercase )
if shuffle:
assert shuffle_buffer_size is not None
__lowercase = dataset.shuffle(args.shuffle_buffer_size )
__lowercase = dataset.batch(lowercase , drop_remainder=lowercase )
__lowercase = dataset.map(lowercase , num_parallel_calls=lowercase )
__lowercase = dataset.prefetch(lowercase )
return dataset
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if not args.no_tpu:
__lowercase = initialize_tpu(lowercase )
__lowercase = tf.distribute.TPUStrategy(lowercase )
else:
__lowercase = tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
__lowercase = AutoTokenizer.from_pretrained(args.tokenizer )
__lowercase = AutoConfig.from_pretrained(args.pretrained_model_config )
__lowercase = tokenizer.vocab_size
__lowercase = tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(F"No .tfrecord files found in {args.train_dataset}." )
__lowercase = tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(F"No .tfrecord files found in {args.eval_dataset}." )
__lowercase = count_samples(lowercase )
__lowercase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
__lowercase = steps_per_epoch * args.num_epochs
with strategy.scope():
__lowercase = TFAutoModelForMaskedLM.from_config(lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
__lowercase , __lowercase = create_optimizer(
num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase , metrics=['''accuracy'''] )
def decode_fn(lowercase ):
__lowercase = {
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase , lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
__lowercase = DataCollatorForLanguageModeling(
tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors='''tf''' )
def mask_with_collator(lowercase ):
# TF really needs an isin() function
__lowercase = (
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
__lowercase , __lowercase = data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , )
return batch
__lowercase = args.per_replica_batch_size * strategy.num_replicas_in_sync
__lowercase = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
__lowercase = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , )
__lowercase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) )
model.fit(
lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__a : int = parse_args()
main(args) | 210 | import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : str = logging.get_logger(__name__)
__a : int = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Any = '''wavlm'''
def __init__( self , lowerCAmelCase__=32 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="group" , lowerCAmelCase__="gelu" , lowerCAmelCase__=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__=False , lowerCAmelCase__=1_28 , lowerCAmelCase__=16 , lowerCAmelCase__=3_20 , lowerCAmelCase__=8_00 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.05 , lowerCAmelCase__=10 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=10 , lowerCAmelCase__=3_20 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_00 , lowerCAmelCase__=2_56 , lowerCAmelCase__=2_56 , lowerCAmelCase__=0.1 , lowerCAmelCase__="mean" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=2_56 , lowerCAmelCase__=(5_12, 5_12, 5_12, 5_12, 15_00) , lowerCAmelCase__=(5, 3, 3, 1, 1) , lowerCAmelCase__=(1, 2, 3, 1, 1) , lowerCAmelCase__=5_12 , lowerCAmelCase__=80 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
__lowercase = hidden_size
__lowercase = feat_extract_norm
__lowercase = feat_extract_activation
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = conv_bias
__lowercase = num_buckets
__lowercase = max_bucket_distance
__lowercase = num_conv_pos_embeddings
__lowercase = num_conv_pos_embedding_groups
__lowercase = len(self.conv_dim )
__lowercase = num_hidden_layers
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_attention_heads
__lowercase = hidden_dropout
__lowercase = attention_dropout
__lowercase = activation_dropout
__lowercase = feat_proj_dropout
__lowercase = final_dropout
__lowercase = layerdrop
__lowercase = layer_norm_eps
__lowercase = initializer_range
__lowercase = num_ctc_classes
__lowercase = vocab_size
__lowercase = do_stable_layer_norm
__lowercase = use_weighted_layer_sum
__lowercase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase = apply_spec_augment
__lowercase = mask_time_prob
__lowercase = mask_time_length
__lowercase = mask_time_min_masks
__lowercase = mask_feature_prob
__lowercase = mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowercase = num_codevectors_per_group
__lowercase = num_codevector_groups
__lowercase = contrastive_logits_temperature
__lowercase = num_negatives
__lowercase = codevector_dim
__lowercase = proj_codevector_dim
__lowercase = diversity_loss_weight
# ctc loss
__lowercase = ctc_loss_reduction
__lowercase = ctc_zero_infinity
# adapter
__lowercase = add_adapter
__lowercase = adapter_kernel_size
__lowercase = adapter_stride
__lowercase = num_adapter_layers
__lowercase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = list(lowerCAmelCase__ )
__lowercase = xvector_output_dim
@property
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 210 | 1 |
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
_snake_case = len(_UpperCamelCase )
_snake_case = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
_snake_case = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
_snake_case = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
_snake_case = subset[i - 1][j]
if arr[i - 1] <= j:
_snake_case = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class lowercase_ ( unittest.TestCase ):
def __init__( self : Union[str, Any] , A__ : int , A__ : List[str]=7 , A__ : Tuple=3 , A__ : List[str]=10 , A__ : Optional[int]=18 , A__ : int=30 , A__ : Tuple=400 , A__ : Dict=True , A__ : str=None , A__ : str=True , A__ : List[str]=[0.5, 0.5, 0.5] , A__ : int=[0.5, 0.5, 0.5] , A__ : List[Any]=None , ) -> int:
_snake_case = size if size is not None else {'''shortest_edge''': 18}
_snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = num_frames
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
_snake_case = crop_size
def UpperCamelCase_ ( self : List[str] ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Tuple = VivitImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : Union[str, Any] ) -> List[str]:
_snake_case = VivitImageProcessingTester(self )
@property
def UpperCamelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Optional[int] ) -> Optional[Any]:
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , '''image_mean''' ) )
self.assertTrue(hasattr(A__ , '''image_std''' ) )
self.assertTrue(hasattr(A__ , '''do_normalize''' ) )
self.assertTrue(hasattr(A__ , '''do_resize''' ) )
self.assertTrue(hasattr(A__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(A__ , '''size''' ) )
def UpperCamelCase_ ( self : int ) -> List[Any]:
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self : List[Any] ) -> Optional[Any]:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_snake_case = prepare_video_inputs(self.image_processor_tester , equal_resolution=A__ )
for video in video_inputs:
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_snake_case = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self : Any ) -> List[str]:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_video_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for video in video_inputs:
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_snake_case = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self : Optional[Any] ) -> int:
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_video_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for video in video_inputs:
self.assertIsInstance(A__ , A__ )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_snake_case = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 278 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ = 'gpt_neox_japanese'
def __init__( self : List[Any] , _A : int=32_000 , _A : Tuple=2_560 , _A : str=32 , _A : Union[str, Any]=32 , _A : List[str]=4 , _A : Any="gelu" , _A : Optional[int]=1.0_0 , _A : Any=10_000 , _A : Optional[Any]=2_048 , _A : List[str]=0.0_2 , _A : Tuple=1e-5 , _A : Tuple=True , _A : int=31_996 , _A : Optional[int]=31_999 , _A : Tuple=0.1 , _A : Union[str, Any]=0.0 , **_A : Optional[Any] , ):
'''simple docstring'''
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : Tuple = num_hidden_layers
UpperCAmelCase__ : Union[str, Any] = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_multiple_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Optional[Any] = rotary_pct
UpperCAmelCase__ : List[str] = rotary_emb_base
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : Union[str, Any] = layer_norm_eps
UpperCAmelCase__ : Dict = use_cache
UpperCAmelCase__ : str = attention_dropout
UpperCAmelCase__ : Dict = hidden_dropout
| 181 |
"""simple docstring"""
lowerCamelCase_ : Any = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager | 81 | 0 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
_SCREAMING_SNAKE_CASE : Tuple = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(__lowerCAmelCase )
from datasets import load_dataset
_SCREAMING_SNAKE_CASE : Any = load_dataset("nielsr/rvlcdip-demo" )
_SCREAMING_SNAKE_CASE : Any = dataset["train"][0]["image"].convert("RGB" )
_SCREAMING_SNAKE_CASE : int = image_processor(__lowerCAmelCase , return_tensors="pt" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
_SCREAMING_SNAKE_CASE : int = model(**__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Dict = outputs.logits
_SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=__lowerCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) ) | 371 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase = 1_2_8 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 2000.0 , __lowerCamelCase = 7_6_8 , __lowerCamelCase = 1_2 , __lowerCamelCase = 1_2 , __lowerCamelCase = 6_4 , __lowerCamelCase = 2_0_4_8 , __lowerCamelCase = 0.1 , ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
_SCREAMING_SNAKE_CASE : str = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
_SCREAMING_SNAKE_CASE : Optional[int] = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = nn.Dropout(p=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_SCREAMING_SNAKE_CASE : str = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_SCREAMING_SNAKE_CASE : Tuple = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_SCREAMING_SNAKE_CASE : Optional[int] = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.position_encoding(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
_SCREAMING_SNAKE_CASE : Any = self.dropout(__lowerCamelCase )
# decoder: No padding present.
_SCREAMING_SNAKE_CASE : Any = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_SCREAMING_SNAKE_CASE : List[str] = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_SCREAMING_SNAKE_CASE : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_SCREAMING_SNAKE_CASE : Optional[Any] = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
_SCREAMING_SNAKE_CASE : int = self.decoder_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.post_dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = self.spec_out(__lowerCamelCase )
return spec_out
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> Dict:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
_SCREAMING_SNAKE_CASE : str = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_SCREAMING_SNAKE_CASE : Tuple = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
_SCREAMING_SNAKE_CASE : Optional[Any] = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> Union[str, Any]:
# pre_self_attention_layer_norm
_SCREAMING_SNAKE_CASE : int = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Any = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
_SCREAMING_SNAKE_CASE : Optional[int] = self.attention(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]:
super().__init__()
_SCREAMING_SNAKE_CASE : Optional[Any] = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Tuple = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase=None ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.film(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = self.DenseReluDense(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = NewGELUActivation()
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = self.act(self.wi_a(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Dict = self.wi_a(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = hidden_gelu * hidden_linear
_SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = self.wo(__lowerCamelCase )
return hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase=1E-6 ) -> int:
super().__init__()
_SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : str = eps
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_SCREAMING_SNAKE_CASE : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_SCREAMING_SNAKE_CASE : str = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def UpperCamelCase_ ( self , __lowerCamelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(__lowerCamelCase , 3.0 )) ))
class lowerCAmelCase__( nn.Module ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
super().__init__()
_SCREAMING_SNAKE_CASE : Any = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : List[Any] = self.scale_bias(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = torch.chunk(__lowerCamelCase , 2 , -1 )
_SCREAMING_SNAKE_CASE : Optional[int] = x * (1 + scale) + shift
return x | 325 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
a__ : Any =logging.get_logger(__name__)
a__ : Optional[Any] ={
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict ="gpt_neo"
SCREAMING_SNAKE_CASE_ : Optional[int] =["past_key_values"]
SCREAMING_SNAKE_CASE_ : List[Any] ={"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self : Union[str, Any] , __A : Union[str, Any]=5_0_2_5_7 , __A : Any=2_0_4_8 , __A : Optional[Any]=2_0_4_8 , __A : Any=2_4 , __A : Union[str, Any]=[[["global", "local"], 1_2]] , __A : str=1_6 , __A : Optional[int]=None , __A : Union[str, Any]=2_5_6 , __A : Any="gelu_new" , __A : Dict=0.0 , __A : Optional[int]=0.0 , __A : int=0.0 , __A : List[str]=0.1 , __A : Any=1e-5 , __A : int=0.02 , __A : List[str]=True , __A : Tuple=5_0_2_5_6 , __A : Optional[Any]=5_0_2_5_6 , **__A : Optional[Any] , ):
__UpperCamelCase = vocab_size
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = hidden_size
__UpperCamelCase = num_layers
__UpperCamelCase = num_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = window_size
__UpperCamelCase = activation_function
__UpperCamelCase = resid_dropout
__UpperCamelCase = embed_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = classifier_dropout
__UpperCamelCase = layer_norm_epsilon
__UpperCamelCase = initializer_range
__UpperCamelCase = use_cache
__UpperCamelCase = bos_token_id
__UpperCamelCase = eos_token_id
__UpperCamelCase = attention_types
__UpperCamelCase = self.expand_attention_types_params(__A )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.' )
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
@staticmethod
def _lowerCamelCase ( __A : Tuple ):
__UpperCamelCase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase__ ( __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Any:
"""simple docstring"""
import torch
__UpperCamelCase = input.size()
__UpperCamelCase = len(__lowercase )
__UpperCamelCase = shape[dimension]
__UpperCamelCase = torch.arange(0 , __lowercase , __lowercase )
__UpperCamelCase = torch.div(sizedim - size , __lowercase , rounding_mode='floor' ) + 1
__UpperCamelCase = torch.arange(__lowercase ) + low_indices[:min_length][:, None]
__UpperCamelCase = [slice(__lowercase )] * rank
__UpperCamelCase = indices
__UpperCamelCase = input[s]
__UpperCamelCase = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__lowercase )
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
import torch
__UpperCamelCase = torch.arange(1 , __lowercase )
__UpperCamelCase = torch.remainder(__lowercase , __lowercase )
__UpperCamelCase = remainders == 0
__UpperCamelCase = candidates[divisor_indices]
__UpperCamelCase = torch.max(__lowercase )
return largest_divisor, torch.div(__lowercase , __lowercase , rounding_mode='floor' )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
@property
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__A , direction='inputs' )
__UpperCamelCase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__UpperCamelCase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _lowerCamelCase ( self : int ):
return self._config.num_heads
def _lowerCamelCase ( self : List[str] , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ):
__UpperCamelCase = super(__A , self ).generate_dummy_inputs(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__UpperCamelCase , __UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__UpperCamelCase = seqlen + 2
__UpperCamelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCamelCase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
__UpperCamelCase = common_inputs['attention_mask']
if self.use_past:
__UpperCamelCase = ordered_inputs['attention_mask'].dtype
__UpperCamelCase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__A , __A , dtype=__A )] , dim=1 )
return ordered_inputs
@property
def _lowerCamelCase ( self : Dict ):
return 1_3
| 53 |
from __future__ import annotations
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> list[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(snake_case__ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
_SCREAMING_SNAKE_CASE = i + 1
else:
_SCREAMING_SNAKE_CASE = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 306 | 0 |
'''simple docstring'''
import numpy as np
import qiskit
def A (__lowerCamelCase :int = 8 , __lowerCamelCase :int | None = None ):
_lowerCAmelCase = np.random.default_rng(seed=__lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase = rng.integers(2 , size=__lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase = rng.integers(2 , size=__lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase = rng.integers(2 , size=__lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase = qiskit.QuantumCircuit(__lowerCamelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(__lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(__lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(__lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase = qiskit.execute(__lowerCamelCase , __lowerCamelCase , shots=1 , seed_simulator=__lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase = job.result().get_counts(__lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase = gen_key[:key_len] if len(__lowerCamelCase ) >= key_len else gen_key.ljust(__lowerCamelCase , """0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 229 |
'''simple docstring'''
import numpy as np
import qiskit
def A (__lowerCamelCase :int = 8 , __lowerCamelCase :int | None = None ):
_lowerCAmelCase = np.random.default_rng(seed=__lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase = rng.integers(2 , size=__lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase = rng.integers(2 , size=__lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase = rng.integers(2 , size=__lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase = qiskit.QuantumCircuit(__lowerCamelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(__lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(__lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(__lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase = qiskit.execute(__lowerCamelCase , __lowerCamelCase , shots=1 , seed_simulator=__lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase = job.result().get_counts(__lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase = gen_key[:key_len] if len(__lowerCamelCase ) >= key_len else gen_key.ljust(__lowerCamelCase , """0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 229 | 1 |
'''simple docstring'''
A__ : Any ='''\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'''
A__ : Tuple =[{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__ : Optional[int] ={
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 70 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Any = '''git_vision_model'''
def __init__( self, A=768, A=3_072, A=12, A=12, A=3, A=224, A=16, A="quick_gelu", A=1E-5, A=0.0, A=0.02, **A, ):
'''simple docstring'''
super().__init__(**A )
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = image_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = attention_dropout
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
@classmethod
def UpperCamelCase_ ( cls, A, **A ):
'''simple docstring'''
cls._set_token_in_kwargs(A )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = cls.get_config_dict(A, **A )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('model_type' ) == "git":
SCREAMING_SNAKE_CASE : Optional[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls, 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A, **A )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[Any] = '''git'''
def __init__( self, A=None, A=30_522, A=768, A=6, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=1_024, A=0.02, A=1E-12, A=0, A="absolute", A=True, A=False, A=101, A=102, A=None, **A, ):
'''simple docstring'''
super().__init__(bos_token_id=A, eos_token_id=A, pad_token_id=A, **A )
if vision_config is None:
SCREAMING_SNAKE_CASE : List[str] = {}
logger.info('vision_config is None. initializing the GitVisionConfig with default values.' )
SCREAMING_SNAKE_CASE : List[str] = GitVisionConfig(**A )
SCREAMING_SNAKE_CASE : List[str] = vocab_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : Any = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Dict = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : int = position_embedding_type
SCREAMING_SNAKE_CASE : str = use_cache
SCREAMING_SNAKE_CASE : int = tie_word_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = num_image_with_embedding
SCREAMING_SNAKE_CASE : List[str] = bos_token_id
SCREAMING_SNAKE_CASE : int = eos_token_id
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE : int = self.__class__.model_type
return output
| 251 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 353 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
snake_case = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = torch.load(lowercase , map_location="cpu" )
return sd
def lowerCamelCase__ ( lowercase , lowercase , lowercase=rename_keys_prefix ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = OrderedDict()
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE : Optional[Any] = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE : Tuple = new_key.replace(name_pair[0] , name_pair[1] )
SCREAMING_SNAKE_CASE : Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE : Union[str, Any] = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = "pretraining"
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : str = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[int] = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Union[str, Any] = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE : Optional[Any] = {"visual_embedding_dim": 512}
SCREAMING_SNAKE_CASE : Union[str, Any] = "multichoice"
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {"visual_embedding_dim": 2048}
SCREAMING_SNAKE_CASE : Any = "vqa_advanced"
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE : Any = {"visual_embedding_dim": 2048, "num_labels": 3129}
SCREAMING_SNAKE_CASE : Tuple = "vqa"
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE : int = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
SCREAMING_SNAKE_CASE : Union[str, Any] = "nlvr"
SCREAMING_SNAKE_CASE : List[Any] = VisualBertConfig(**lowercase )
# Load State Dict
SCREAMING_SNAKE_CASE : Union[str, Any] = load_state_dict(lowercase )
SCREAMING_SNAKE_CASE : Union[str, Any] = get_new_dict(lowercase , lowercase )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE : Union[str, Any] = VisualBertForPreTraining(lowercase )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForQuestionAnswering(lowercase )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE : Optional[Any] = VisualBertForVisualReasoning(lowercase )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE : List[Any] = VisualBertForMultipleChoice(lowercase )
model.load_state_dict(lowercase )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
snake_case = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 319 | 0 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
UpperCAmelCase = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
UpperCAmelCase = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
UpperCAmelCase = '''zero2'''
UpperCAmelCase = '''zero3'''
UpperCAmelCase = [ZEROa, ZEROa]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
lowercase = parameterized.to_safe_name('_'.join(str(__SCREAMING_SNAKE_CASE ) for x in param.args ) )
return F'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
UpperCAmelCase = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class A_ ( __lowerCamelCase ):
'''simple docstring'''
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
@require_torch_multi_gpu
@parameterized.expand(snake_case , name_func=snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
self.run_and_check(
stage=snake_case , model=snake_case , distributed=snake_case , fpaa=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = 10 , snake_case = True , snake_case = True , snake_case = True , ):
lowercase = models[model]
lowercase = self.run_trainer(
stage=snake_case , model_name=snake_case , eval_steps=snake_case , num_train_epochs=1 , distributed=snake_case , fpaa=snake_case , )
self.do_checks(snake_case )
return output_dir
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case = 10 , snake_case = 1 , snake_case = True , snake_case = True , ):
lowercase = self.get_auto_remove_tmp_dir('./xxx' , after=snake_case )
lowercase = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(snake_case )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
lowercase = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
lowercase = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
lowercase = self.get_launcher(snake_case )
lowercase = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(snake_case , env=self.get_env() )
return output_dir
def SCREAMING_SNAKE_CASE__ ( self , snake_case=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
lowercase = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 195 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = 1.5
lowercase = int(factor * num_class_images )
lowercase = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=__SCREAMING_SNAKE_CASE )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase = client.query(text=__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase = int(factor * num_images )
lowercase = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , )
lowercase = 0
lowercase = 0
lowercase = tqdm(desc='downloading real regularization images' , total=__SCREAMING_SNAKE_CASE )
with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open(
F'''{class_data_dir}/images.txt''' , 'w' ) as fa:
while total < num_class_images:
lowercase = class_images[count]
count += 1
try:
lowercase = requests.get(images['url'] )
if img.status_code == 200:
lowercase = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCAmelCase_ ( ):
lowercase = argparse.ArgumentParser('' , add_help=__SCREAMING_SNAKE_CASE )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
parser.add_argument('--class_data_dir' , help='path to save images' , required=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE )
parser.add_argument('--num_class_images' , help='number of images to download' , default=200 , type=__SCREAMING_SNAKE_CASE )
return parser.parse_args()
if __name__ == "__main__":
UpperCAmelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 195 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCAmelCase: Optional[Any] = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase__ ( _A ):
if isinstance(_A , torch.Tensor ):
return image
elif isinstance(_A , PIL.Image.Image ):
a : str = [image]
a : Dict = [trans(img.convert('RGB' ) ) for img in image]
a : Optional[int] = torch.stack(_A )
return image
class a__( lowerCamelCase__ ):
def __init__( self : List[Any] , __snake_case : Optional[Any] , __snake_case : str ):
super().__init__()
# make sure scheduler can always be converted to DDIM
a : str = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__snake_case , scheduler=__snake_case )
def lowercase_ ( self : Union[str, Any] , __snake_case : Union[str, Any] ):
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowercase_ ( self : Optional[Any] , __snake_case : Dict , __snake_case : Any , __snake_case : List[str] ):
# get the original timestep using init_timestep
a : Union[str, Any] = min(int(num_inference_steps * strength ) , __snake_case )
a : str = max(num_inference_steps - init_timestep , 0 )
a : int = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowercase_ ( self : Union[str, Any] , __snake_case : str , __snake_case : str , __snake_case : int , __snake_case : Tuple , __snake_case : Dict , __snake_case : Dict=None ):
if not isinstance(__snake_case , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__snake_case )}""" )
a : Union[str, Any] = image.to(device=__snake_case , dtype=__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__snake_case )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
a : List[Any] = init_latents.shape
a : str = randn_tensor(__snake_case , generator=__snake_case , device=__snake_case , dtype=__snake_case )
# get latents
print('add noise to latents at timestep' , __snake_case )
a : List[Any] = self.scheduler.add_noise(__snake_case , __snake_case , __snake_case )
a : Dict = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , __snake_case : Union[torch.FloatTensor, PIL.Image.Image] = None , __snake_case : float = 0.8 , __snake_case : int = 1 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : float = 0.0 , __snake_case : int = 50 , __snake_case : Optional[bool] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , ):
self.check_inputs(__snake_case )
# 2. Preprocess image
a : Tuple = preprocess(__snake_case )
# 3. set timesteps
self.scheduler.set_timesteps(__snake_case , device=self.device )
a : Optional[Any] = self.get_timesteps(__snake_case , __snake_case , self.device )
a : List[str] = timesteps[:1].repeat(__snake_case )
# 4. Prepare latent variables
a : List[str] = self.prepare_latents(__snake_case , __snake_case , __snake_case , self.unet.dtype , self.device , __snake_case )
a : Optional[Any] = latents
# 5. Denoising loop
for t in self.progress_bar(__snake_case ):
# 1. predict noise model_output
a : List[Any] = self.unet(__snake_case , __snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
a : Optional[int] = self.scheduler.step(
__snake_case , __snake_case , __snake_case , eta=__snake_case , use_clipped_model_output=__snake_case , generator=__snake_case , ).prev_sample
a : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
a : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
a : List[str] = self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__snake_case ) | 369 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase: Any = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: List[str] = ['PoolFormerFeatureExtractor']
lowerCAmelCase: Tuple = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: str = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Dict = _LazyModule(__name__, globals()['__file__'], _import_structure) | 96 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def lowercase ( A_ = "https://www.worldometers.info/coronavirus" )-> int:
'''simple docstring'''
a : Any = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , "html.parser" )
a : Tuple = soup.findAll("h1" )
a : int = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 40 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase = logging.get_logger(__name__)
class _lowerCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCAmelCase_ = ["pixel_values"]
def __init__(self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PIL.Image.BICUBIC , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
super().__init__(**UpperCAmelCase )
_snake_case = size if size is not None else {"""height""": 256, """width""": 256}
_snake_case = get_size_dict(UpperCAmelCase )
_snake_case = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_snake_case = get_size_dict(UpperCAmelCase , param_name="""crop_size""" )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PIL.Image.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
_snake_case = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
UpperCAmelCase , size=(size["""height"""], size["""width"""]) , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
_snake_case = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> List[Any]:
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> PIL.Image.Image:
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(UpperCAmelCase )
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(UpperCAmelCase , param_name="""crop_size""" )
_snake_case = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
_snake_case = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
_snake_case = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
_snake_case = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase ) | 341 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False ) -> bool:
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
A__ = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
A__ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(SCREAMING_SNAKE_CASE__ , 1 ):
if n < _p:
# then we have our last prime to check
A__ = primes[:idx]
break
A__ , A__ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
A__ = False
for r in range(SCREAMING_SNAKE_CASE__ ):
A__ = pow(SCREAMING_SNAKE_CASE__ , d * 2**r , SCREAMING_SNAKE_CASE__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
A__ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def _snake_case( ) -> None:
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 358 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str],lowercase_ : List[str],lowercase_ : bool = True,lowercase_ : Dict[str, int] = None,lowercase_ : int = 3_2,lowercase_ : bool = True,lowercase_ : Union[int, float] = 1 / 2_5_5,lowercase_ : bool = True,lowercase_ : bool = True,lowercase_ : Optional[Union[float, List[float]]] = [0.48_145_466, 0.4_578_275, 0.40_821_073],lowercase_ : Optional[Union[float, List[float]]] = [0.26_862_954, 0.26_130_258, 0.27_577_711],lowercase_ : bool = True,lowercase_ : Tuple=7,lowercase_ : str=3_0,lowercase_ : Union[str, Any]=4_0_0,lowercase_ : Dict=3,)-> List[Any]:
'''simple docstring'''
A__ = parent
A__ = do_resize
A__ = size if size is not None else {'shortest_edge': 2_8_8}
A__ = size_divisor
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = do_center_crop
A__ = image_mean
A__ = image_std
A__ = do_pad
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
def snake_case__ ( self : Optional[Any] )-> Optional[int]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def snake_case__ ( self : int,lowercase_ : Optional[int],lowercase_ : List[str]=False )-> Any:
'''simple docstring'''
if not batched:
A__ = self.size['shortest_edge']
A__ = image_inputs[0]
if isinstance(lowercase_,Image.Image ):
A__ , A__ = image.size
else:
A__ , A__ = image.shape[1], image.shape[2]
A__ = size / min(lowercase_,lowercase_ )
if h < w:
A__ , A__ = size, scale * w
else:
A__ , A__ = scale * h, size
A__ = int((1_3_3_3 / 8_0_0) * size )
if max(lowercase_,lowercase_ ) > max_size:
A__ = max_size / max(lowercase_,lowercase_ )
A__ = newh * scale
A__ = neww * scale
A__ , A__ = int(newh + 0.5 ), int(neww + 0.5 )
A__ , A__ = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A__ = []
for image in image_inputs:
A__ , A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(lowercase_,key=lambda lowercase_ : item[0] )[0]
A__ = max(lowercase_,key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = BridgeTowerImageProcessor if is_vision_available() else None
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
A__ = BridgeTowerImageProcessingTester(self )
@property
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_,'image_mean' ) )
self.assertTrue(hasattr(lowercase_,'image_std' ) )
self.assertTrue(hasattr(lowercase_,'do_normalize' ) )
self.assertTrue(hasattr(lowercase_,'do_resize' ) )
self.assertTrue(hasattr(lowercase_,'size' ) )
self.assertTrue(hasattr(lowercase_,'size_divisor' ) )
def snake_case__ ( self : Any )-> List[str]:
'''simple docstring'''
pass
def snake_case__ ( self : int )-> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_,Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
A__ = image_processing(lowercase_,return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_,batched=lowercase_ )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def snake_case__ ( self : List[str] )-> Tuple:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=lowercase_,numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_,np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
A__ = image_processing(lowercase_,return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_,batched=lowercase_ )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
def snake_case__ ( self : Optional[Any] )-> List[Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester,equal_resolution=lowercase_,torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_,torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0],return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape,(1, self.image_processor_tester.num_channels, expected_height, expected_width),)
# Test batched
A__ = image_processing(lowercase_,return_tensors='pt' ).pixel_values
A__ , A__ = self.image_processor_tester.get_expected_values(lowercase_,batched=lowercase_ )
self.assertEqual(
encoded_images.shape,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
),)
| 282 | 0 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowercase : Tuple = logging.get_logger(__name__)
class A ( __snake_case ):
__magic_name__ = ['''audio_values''', '''audio_mask''']
def __init__( self , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=[16, 16] , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=44100 , SCREAMING_SNAKE_CASE=86 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=0.0 , **SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
super().__init__(
feature_size=SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , padding_value=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
A : str = spectrogram_length
A : Optional[int] = num_channels
A : str = patch_size
A : Optional[Any] = feature_size // self.patch_size[1]
A : str = n_fft
A : Dict = sampling_rate // hop_length_to_sampling_rate
A : Optional[int] = sampling_rate
A : int = padding_value
A : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=SCREAMING_SNAKE_CASE , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> np.ndarray:
"""simple docstring"""
A : Dict = spectrogram(
SCREAMING_SNAKE_CASE , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
A : int = log_spec[:, :-1]
A : Dict = log_spec - 20.0
A : List[Any] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , **SCREAMING_SNAKE_CASE , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
A : List[str] = isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
A : str = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A : Tuple = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
A : int = np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A : List[Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
A : Optional[Any] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , SCREAMING_SNAKE_CASE ):
A : str = [np.asarray(SCREAMING_SNAKE_CASE , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
A : Optional[int] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
A : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
A : List[Any] = np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa )
# convert into correct format for padding
A : Tuple = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
A : List[str] = np.ones([len(SCREAMING_SNAKE_CASE ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
A : int = padded_audio_features * self.padding_value
for i in range(len(SCREAMING_SNAKE_CASE ) ):
A : Optional[Any] = audio_features[i]
A : str = feature
# return as BatchFeature
if return_attention_mask:
A : int = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
A : List[str] = {'''audio_values''': padded_audio_features}
A : Optional[int] = BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
return encoded_inputs
| 3 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ : int = logging.get_logger(__name__)
lowercase_ : Optional[Any] = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class __lowerCAmelCase ( UpperCAmelCase__ ):
snake_case_ : int = "roberta"
def __init__( self : Dict , snake_case__ : Tuple=50_265 , snake_case__ : str=768 , snake_case__ : Tuple=12 , snake_case__ : Tuple=12 , snake_case__ : Union[str, Any]=3_072 , snake_case__ : Optional[Any]="gelu" , snake_case__ : int=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : str=512 , snake_case__ : List[str]=2 , snake_case__ : str=0.02 , snake_case__ : int=1e-12 , snake_case__ : List[str]=1 , snake_case__ : Any=0 , snake_case__ : int=2 , snake_case__ : List[Any]="absolute" , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=None , **snake_case__ : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class __lowerCAmelCase ( UpperCAmelCase__ ):
@property
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 133 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : str = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Union[str, Any] = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 354 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A : Optional[Any] = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
A : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a : List[str] = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
a : Tuple = {'''allegro/herbert-base-cased''': 514}
a : Optional[int] = {}
class __UpperCamelCase ( a__ ):
lowerCamelCase : str =VOCAB_FILES_NAMES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] =HerbertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="</s>" , **lowerCAmelCase__ , ) -> Optional[int]:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.cls_token_id]
a : Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Dict = [self.sep_token_id]
a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
a : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 105 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
a : List[str] = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
a : Tuple = {'''allegro/herbert-base-cased''': 514}
a : Optional[int] = {}
class __UpperCamelCase ( a__ ):
lowerCamelCase : str =VOCAB_FILES_NAMES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] =HerbertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="</s>" , **lowerCAmelCase__ , ) -> Optional[int]:
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.cls_token_id]
a : Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Dict = [self.sep_token_id]
a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
a : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 105 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if inductance <= 0:
raise ValueError("Inductance cannot be 0 or negative" )
elif capacitance <= 0:
raise ValueError("Capacitance cannot be 0 or negative" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 259 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.