code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
_snake_case : Dict = logging.getLogger(__name__)
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=30_522, type=int)
_snake_case : str = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, "rb") as fp:
_snake_case : str = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
_snake_case : Optional[int] = Counter()
for tk_ids in data:
counter.update(tk_ids)
_snake_case : int = [0] * args.vocab_size
for k, v in counter.items():
_snake_case : List[Any] = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 81 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
UpperCAmelCase_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
UpperCAmelCase_ : Optional[int] = load_dataset('''nielsr/rvlcdip-demo''' )
UpperCAmelCase_ : Optional[Any] = dataset['''train'''][0]['''image'''].convert('''RGB''' )
UpperCAmelCase_ : str = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.logits
UpperCAmelCase_ : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.float ,)
self.assertTrue(torch.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) | 30 | 0 |
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = (CMStochasticIterativeScheduler,)
UpperCamelCase = 10
def lowercase__ ( self : Optional[int] , **_UpperCAmelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**_UpperCAmelCase )
return config
def lowercase__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = self.scheduler_classes[0](**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
UpperCAmelCase_ = scheduler.timesteps[0]
UpperCAmelCase_ = scheduler.timesteps[1]
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
UpperCAmelCase_ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowercase__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase_ = 1
scheduler.set_timesteps(_UpperCAmelCase )
UpperCAmelCase_ = scheduler.timesteps
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_UpperCAmelCase ):
# 1. scale model input
UpperCAmelCase_ = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict noise residual
UpperCAmelCase_ = model(_UpperCAmelCase , _UpperCAmelCase )
# 3. predict previous sample x_t-1
UpperCAmelCase_ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
UpperCAmelCase_ = pred_prev_sample
UpperCAmelCase_ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase_ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def lowercase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase_ = [106, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
UpperCAmelCase_ = scheduler.timesteps
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCAmelCase_ = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict noise residual
UpperCAmelCase_ = model(_UpperCAmelCase , _UpperCAmelCase )
# 3. predict previous sample x_t-1
UpperCAmelCase_ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
UpperCAmelCase_ = pred_prev_sample
UpperCAmelCase_ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase_ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def lowercase__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase_ = [39, 30, 12, 15, 0]
with self.assertRaises(_UpperCAmelCase , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase_ = [39, 30, 12, 1, 0]
UpperCAmelCase_ = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
| 82 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) | 30 | 0 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def snake_case_ ( A_ : Tuple, A_ : List[str], A_ : Optional[Any], A_ : Dict, A_ : Dict=True, A_ : int="pt" ):
'''simple docstring'''
_lowerCamelCase : str = {'''add_prefix_space''': True} if isinstance(A_, A_ ) and not line.startswith(''' ''' ) else {}
_lowerCamelCase : Union[str, Any] = padding_side
return tokenizer(
[line], max_length=A_, padding='''max_length''' if pad_to_max_length else None, truncation=A_, return_tensors=A_, add_special_tokens=A_, **A_, )
def snake_case_ ( A_ : Any, A_ : Optional[int], A_ : List[Any]=None, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = input_ids.ne(A_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __snake_case ( _lowercase):
def __init__( self : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple="train" , __lowerCAmelCase : List[str]=None , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=None , __lowerCAmelCase : Union[str, Any]="" , ):
"""simple docstring"""
super().__init__()
_lowerCamelCase : Optional[int] = Path(__lowerCAmelCase ).joinpath(type_path + '''.source''' )
_lowerCamelCase : List[str] = Path(__lowerCAmelCase ).joinpath(type_path + '''.target''' )
_lowerCamelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCamelCase : Optional[int] = max_source_length
_lowerCamelCase : Optional[Any] = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_lowerCamelCase : List[Any] = tokenizer
_lowerCamelCase : List[Any] = prefix
if n_obs is not None:
_lowerCamelCase : List[str] = self.src_lens[:n_obs]
_lowerCamelCase : int = src_lang
_lowerCamelCase : Union[str, Any] = tgt_lang
def __len__( self : int ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Dict , __lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = index + 1 # linecache starts at 1
_lowerCamelCase : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , __lowerCAmelCase ).rstrip('''\n''' )
_lowerCamelCase : Optional[Any] = linecache.getline(str(self.tgt_file ) , __lowerCAmelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __lowerCAmelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCamelCase : Optional[int] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
)
_lowerCamelCase : Union[str, Any] = self.tokenizer.generator if isinstance(self.tokenizer , __lowerCAmelCase ) else self.tokenizer
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_source_length , '''right''' )
_lowerCamelCase : List[str] = encode_line(__lowerCAmelCase , __lowerCAmelCase , self.max_target_length , '''right''' )
_lowerCamelCase : Optional[Any] = source_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Union[str, Any] = target_inputs['''input_ids'''].squeeze()
_lowerCamelCase : Any = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase : str ):
"""simple docstring"""
return [len(__lowerCAmelCase ) for x in Path(__lowerCAmelCase ).open().readlines()]
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = torch.stack([x['''input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = torch.stack([x['''attention_mask'''] for x in batch] )
_lowerCamelCase : Union[str, Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_lowerCamelCase : Tuple = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Tuple = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __lowerCAmelCase )
else self.tokenizer.pad_token_id
)
_lowerCamelCase : Union[str, Any] = trim_batch(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase , _lowerCamelCase : List[str] = trim_batch(__lowerCAmelCase , __lowerCAmelCase , attention_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
lowerCAmelCase__ = getLogger(__name__)
def snake_case_ ( A_ : List[List] ):
'''simple docstring'''
return list(itertools.chain.from_iterable(A_ ) )
def snake_case_ ( A_ : str ):
'''simple docstring'''
_lowerCamelCase : Dict = get_git_info()
save_json(A_, os.path.join(A_, '''git_log.json''' ) )
def snake_case_ ( A_ : str, A_ : Union[str, Any], A_ : int=4, **A_ : Optional[int] ):
'''simple docstring'''
with open(A_, '''w''' ) as f:
json.dump(A_, A_, indent=A_, **A_ )
def snake_case_ ( A_ : Any ):
'''simple docstring'''
with open(A_ ) as f:
return json.load(A_ )
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = git.Repo(search_parent_directories=A_ )
_lowerCamelCase : str = {
'''repo_id''': str(A_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def snake_case_ ( A_ : Callable, A_ : Iterable ):
'''simple docstring'''
return list(map(A_, A_ ) )
def snake_case_ ( A_ : str, A_ : Tuple ):
'''simple docstring'''
with open(A_, '''wb''' ) as f:
return pickle.dump(A_, A_ )
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
def remove_articles(A_ : str ):
return re.sub(R'''\b(a|an|the)\b''', ''' ''', A_ )
def white_space_fix(A_ : Any ):
return " ".join(text.split() )
def remove_punc(A_ : List[Any] ):
_lowerCamelCase : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A_ : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A_ ) ) ) )
def snake_case_ ( A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : str = normalize_answer(A_ ).split()
_lowerCamelCase : int = normalize_answer(A_ ).split()
_lowerCamelCase : str = Counter(A_ ) & Counter(A_ )
_lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
_lowerCamelCase : int = 1.0 * num_same / len(A_ )
_lowerCamelCase : str = 1.0 * num_same / len(A_ )
_lowerCamelCase : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def snake_case_ ( A_ : Dict, A_ : str ):
'''simple docstring'''
return normalize_answer(A_ ) == normalize_answer(A_ )
def snake_case_ ( A_ : List[str], A_ : List[str] ):
'''simple docstring'''
assert len(A_ ) == len(A_ )
_lowerCamelCase : Optional[Any] = 0
for hypo, pred in zip(A_, A_ ):
em += exact_match_score(A_, A_ )
if len(A_ ) > 0:
em /= len(A_ )
return {"em": em}
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
return model_prefix.startswith('''rag''' )
def snake_case_ ( A_ : Dict, A_ : int, A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCamelCase : Tuple = '''dropout_rate'''
for p in extra_params:
if getattr(A_, A_, A_ ):
if not hasattr(A_, A_ ) and not hasattr(A_, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(A_ ) )
delattr(A_, A_ )
continue
_lowerCamelCase : Union[str, Any] = p if hasattr(A_, A_ ) else equivalent_param[p]
setattr(A_, A_, getattr(A_, A_ ) )
delattr(A_, A_ )
return hparams, config
| 83 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0.9 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,) -> Optional[int]:
UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 30}
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Any = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize_and_center_crop
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : List[str] = crop_pct
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : Any = do_normalize
UpperCAmelCase_ : str = image_mean
UpperCAmelCase_ : List[Any] = image_std
def a__ ( self ) -> str:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def a__ ( self ) -> Dict:
UpperCAmelCase_ : str = PoolFormerImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''crop_pct''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_mean''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_std''' ) )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size ,{'''height''': 30, '''width''': 30} )
UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,) | 30 | 0 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [int(__SCREAMING_SNAKE_CASE ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(__SCREAMING_SNAKE_CASE ) == 4 and all(0 <= int(__SCREAMING_SNAKE_CASE ) <= 254 for octet in octets )
if __name__ == "__main__":
UpperCAmelCase = input().strip()
UpperCAmelCase = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 84 |
import unittest
import numpy as np
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = None , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = np.shape(_lowercase )
UpperCAmelCase_ : Optional[Any] = np.shape(_lowercase )
UpperCAmelCase_ : Tuple = np.shape(_lowercase )
if shape_a[0] != shape_b[0]:
UpperCAmelCase_ : Tuple = (
'''Expected the same number of rows for A and B. '''
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(_lowercase )
if shape_b[1] != shape_c[1]:
UpperCAmelCase_ : List[Any] = (
'''Expected the same number of columns for B and C. '''
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(_lowercase )
UpperCAmelCase_ : Dict = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase_ : Any = np.linalg.inv(_lowercase )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : List[str] = np.array([[2, 1], [6, 3]] )
UpperCAmelCase_ : Tuple = schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.block([[a, b], [b.T, c]] )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = np.linalg.det(_SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE ,det_a * det_s )
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> None:
UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : int = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 30 | 0 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
SCREAMING_SNAKE_CASE__ : Any = 4
SCREAMING_SNAKE_CASE__ : Optional[Any] = 3
class snake_case ( UpperCamelCase_ ):
pass
def _a ( lowercase__ : List[str] ):
'''simple docstring'''
for shard in shards:
for i in range(lowercase__ ):
yield {"i": i, "shard": shard}
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = int(os.environ['RANK'] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(os.environ['WORLD_SIZE'] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ArgumentParser()
parser.add_argument('--streaming' , type=lowercase__ )
parser.add_argument('--local_rank' , type=lowercase__ )
parser.add_argument('--num_workers' , type=lowercase__ , default=0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : List[str] = args.streaming
SCREAMING_SNAKE_CASE__ : int = args.num_workers
SCREAMING_SNAKE_CASE__ : Optional[int] = {'shards': [f'''shard_{shard_idx}''' for shard_idx in range(lowercase__ )]}
SCREAMING_SNAKE_CASE__ : Any = IterableDataset.from_generator(lowercase__ , gen_kwargs=lowercase__ )
if not streaming:
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(list(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = split_dataset_by_node(lowercase__ , rank=lowercase__ , world_size=lowercase__ )
SCREAMING_SNAKE_CASE__ : str = torch.utils.data.DataLoader(lowercase__ , num_workers=lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = NUM_SHARDS * NUM_ITEMS_PER_SHARD
SCREAMING_SNAKE_CASE__ : Tuple = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
SCREAMING_SNAKE_CASE__ : List[str] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 85 |
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
UpperCAmelCase_ : Any = ''''''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
UpperCAmelCase_ : Any = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase_ : Union[str, Any] = B'''=''' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
UpperCAmelCase_ : int = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowercase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Tuple = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase , _lowercase ):
try:
UpperCAmelCase_ : Any = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
UpperCAmelCase_ : str = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase_ : List[Any] = encoded_data[:-padding]
UpperCAmelCase_ : List[Any] = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase_ : Tuple = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase_ : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowercase ) , 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 | 0 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__a :Optional[Any] = 'src/transformers'
__a :Tuple = 'docs/source/en/tasks'
def __snake_case ( __UpperCamelCase : List[Any] ,__UpperCamelCase : Dict ,__UpperCamelCase : int ):
"""simple docstring"""
with open(__UpperCamelCase ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
A_ = f.readlines()
# Find the start prompt.
A_ = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
A_ = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__a :List[str] = direct_transformers_import(TRANSFORMERS_PATH)
__a :Optional[Any] = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__a :Optional[Any] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def __snake_case ( __UpperCamelCase : Tuple ):
"""simple docstring"""
A_ = TASK_GUIDE_TO_MODELS[task_guide]
A_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__UpperCamelCase ,set() )
A_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : List[str]=False ):
"""simple docstring"""
A_ , A_ , A_ , A_ = _find_text_in_file(
filename=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" ,end_prompt="<!--End of the generated tip-->" ,)
A_ = get_model_list_for_task(__UpperCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
" to fix this." )
if __name__ == "__main__":
__a :int = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__a :Optional[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 86 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 0
@slow
def a__ ( self ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,20 )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained('''./''' ,tokenizer_type='''xxx''' )
@require_tokenizers
def a__ ( self ) -> Optional[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase_ : Any = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
else:
self.assertEqual(tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
@require_tokenizers
def a__ ( self ) -> List[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' ,):
UpperCAmelCase_ : int = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def a__ ( self ) -> Optional[Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
UpperCAmelCase_ : int = TOKENIZER_MAPPING.values()
UpperCAmelCase_ : List[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Tuple:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ,use_fast=_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = '''Hello, world. How are you?'''
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
@require_tokenizers
def a__ ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
self.assertEqual(tokenizer.vocab_size ,30_000 )
self.assertEqual(tokenizer.unk_token ,'''[UNK]''' )
self.assertEqual(tokenizer.padding_side ,'''right''' )
self.assertEqual(tokenizer.truncation_side ,'''right''' )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size ,12 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
# Check we can load the tokenizer config of an online model.
UpperCAmelCase_ : int = get_tokenizer_config('''bert-base-cased''' )
UpperCAmelCase_ : Optional[int] = config.pop('''_commit_hash''' ,_SCREAMING_SNAKE_CASE )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_SCREAMING_SNAKE_CASE ,{'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase_ : Any = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] ,'''BertTokenizer''' )
def a__ ( self ) -> str:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def a__ ( self ) -> int:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
# Can register in two steps
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = False
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = NewTokenizer
lowerCAmelCase = False
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
def a__ ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' )
def a__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' )
def a__ ( self ) -> Any:
# Make sure we have cached the tokenizer.
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 ) | 30 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Any = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[int] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 87 |
from functools import reduce
__a = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase__ ( _lowercase = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) )
for i in range(len(_lowercase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 30 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = Dict[str, Any]
UpperCAmelCase = List[Prediction]
@add_end_docstrings(A_ )
class lowercase__ ( A_ ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> int:
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.')
requires_backends(self , """vision""")
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items()))
def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> Optional[Any]:
_lowerCamelCase : int = {}
if "threshold" in kwargs:
_lowerCamelCase : List[Any] = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> Union[Predictions, List[Prediction]]:
return super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Optional[Any]:
_lowerCamelCase : int = load_image(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = torch.IntTensor([[image.height, image.width]])
_lowerCamelCase : Dict = self.image_processor(images=[image] , return_tensors="""pt""")
if self.tokenizer is not None:
_lowerCamelCase : Union[str, Any] = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""")
_lowerCamelCase : Optional[Any] = target_size
return inputs
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Union[str, Any]:
_lowerCamelCase : int = model_inputs.pop("""target_size""")
_lowerCamelCase : Optional[Any] = self.model(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = outputs.__class__({"""target_size""": target_size, **outputs})
if self.tokenizer is not None:
_lowerCamelCase : Union[str, Any] = model_inputs["""bbox"""]
return model_outputs
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0.9) -> List[str]:
_lowerCamelCase : Tuple = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = target_size[0].tolist()
def unnormalize(SCREAMING_SNAKE_CASE):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
]))
_lowerCamelCase , _lowerCamelCase : List[Any] = model_outputs["""logits"""].squeeze(0).softmax(dim=-1).max(dim=-1)
_lowerCamelCase : int = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_lowerCamelCase : Any = [unnormalize(SCREAMING_SNAKE_CASE) for bbox in model_outputs["""bbox"""].squeeze(0)]
_lowerCamelCase : str = ["""score""", """label""", """box"""]
_lowerCamelCase : Optional[Any] = [dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_lowerCamelCase : Union[str, Any] = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = raw_annotations[0]
_lowerCamelCase : Tuple = raw_annotation["""scores"""]
_lowerCamelCase : int = raw_annotation["""labels"""]
_lowerCamelCase : Any = raw_annotation["""boxes"""]
_lowerCamelCase : str = scores.tolist()
_lowerCamelCase : Any = [self.model.config.idalabel[label.item()] for label in labels]
_lowerCamelCase : List[str] = [self._get_bounding_box(SCREAMING_SNAKE_CASE) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_lowerCamelCase : Optional[int] = ["""score""", """label""", """box"""]
_lowerCamelCase : Any = [
dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE))
for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""])
]
return annotation
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""")
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Tuple = box.int().tolist()
_lowerCamelCase : List[Any] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 88 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
UpperCAmelCase_ : Tuple = precision
UpperCAmelCase_ : Optional[Any] = ceil(precision / 14 )
UpperCAmelCase_ : int = 426880 * Decimal(10005 ).sqrt()
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : List[Any] = 13591409
UpperCAmelCase_ : Optional[Any] = Decimal(_lowercase )
for k in range(1 , _lowercase ):
UpperCAmelCase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__a = 50
print(F"""The first {n} digits of pi is: {pi(n)}""") | 30 | 0 |
def UpperCamelCase_( lowerCamelCase_ ) -> int:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
_lowercase : int = 0
_lowercase : Optional[int] = number
while duplicate > 0:
_lowercase , _lowercase : Optional[int] = divmod(lowerCamelCase_ , 10 )
fact_sum += factorial(lowerCamelCase_ )
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
SCREAMING_SNAKE_CASE : Any = int(input("Enter number: ").strip())
print(
F"{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."
)
| 89 |
from __future__ import annotations
import math
__a = '2020.9.26'
__a = 'xcodz-dot, cclaus, dhruvmanila'
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ):
UpperCAmelCase_ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(_lowercase )
UpperCAmelCase_ : Tuple = ((x * distance) / (z + distance)) * scale
UpperCAmelCase_ : str = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Axis must be a str''' )
UpperCAmelCase_ : Optional[Any] = locals()
del input_variables["axis"]
if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ):
UpperCAmelCase_ : List[Any] = (
'''Input values except axis must either be float or int: '''
f'''{list(input_variables.values() )}'''
)
raise TypeError(_lowercase )
UpperCAmelCase_ : Dict = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
UpperCAmelCase_ : Optional[int] = x * math.cos(_lowercase ) - y * math.sin(_lowercase )
UpperCAmelCase_ : List[Any] = y * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z
elif axis == "x":
UpperCAmelCase_ : Any = y * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : int = z * math.cos(_lowercase ) + y * math.sin(_lowercase )
UpperCAmelCase_ : Dict = x
elif axis == "y":
UpperCAmelCase_ : Union[str, Any] = x * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""") | 30 | 0 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = XLNetTokenizer
lowercase__ : int = XLNetTokenizerFast
lowercase__ : Optional[int] = True
lowercase__ : Optional[int] = True
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ = XLNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = '''<s>'''
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<eod>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_06 )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = XLNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [2_85, 46, 10, 1_70, 3_82] )
lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = XLNetTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = XLNetTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
lowerCAmelCase__ = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
# fmt: off
lowerCAmelCase__ = {'''input_ids''': [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , ) | 90 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__a = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__a = concatenate_datasets
__a = DownloadConfig
__a = DownloadManager
__a = DownloadMode
__a = DownloadConfig
__a = DownloadMode
__a = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 30 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : int ):
assert (
isinstance(snake_case__ , snake_case__ ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
A , A = 1, 1
for _ in range(number_of_steps - 1 ):
A , A = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
while a != 0:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = b % a, a
return b
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if gcd(_lowercase , _lowercase ) != 1:
UpperCAmelCase_ : int = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowercase )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = 1, 0, a
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = 0, 1, m
while va != 0:
UpperCAmelCase_ : List[Any] = ua // va
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 30 | 0 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
UpperCamelCase_ = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple:
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def _lowerCAmelCase ( __magic_name__ : int ) -> Any:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Any ) -> Any:
from transformers.testing_utils import pytest_terminal_summary_main
lowercase : Optional[Any] =terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__magic_name__ , id=__magic_name__ )
def _lowerCAmelCase ( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
lowercase : Optional[int] =0
# Doctest custom flag to ignore output.
UpperCamelCase_ = doctest.register_optionflag("""IGNORE_RESULT""")
UpperCamelCase_ = doctest.OutputChecker
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_ = CustomOutputChecker
UpperCamelCase_ = HfDoctestModule
UpperCamelCase_ = HfDocTestParser
| 92 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCAmelCase = '''CIDAS/clipseg-rd64-refined'''
lowerCAmelCase = '''image_segmenter'''
lowerCAmelCase = CLIPSegForImageSegmentation
lowerCAmelCase = ['''image''', '''text''']
lowerCAmelCase = ['''image''']
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self ,['''vision'''] )
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.pre_processor(text=[label] ,images=[image] ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
with torch.no_grad():
UpperCAmelCase_ : Dict = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : Dict = outputs.cpu().detach().numpy()
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) ) | 30 | 0 |
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__A = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
__A = """hopper-medium-v2"""
__A = gym.make(env_name)
__A = ValueGuidedRLPipeline.from_pretrained(
"""bglick13/hopper-medium-v2-value-function-hor32""",
env=env,
)
env.seed(0)
__A = env.reset()
__A = 0
__A = 0
__A = 1000
__A = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__A = pipeline(obs, planning_horizon=32)
# execute action in environment
__A , __A , __A , __A = env.step(denorm_actions)
__A = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
F''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
__A = next_observation
except KeyboardInterrupt:
pass
print(F'''Total reward: {total_reward}''')
| 93 |
import numpy as np
import datasets
__a = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__a = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__a = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' ,id='''sequence''' ) ,id='''X''' ),
} ) ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# convert to numpy arrays
UpperCAmelCase_ : str = np.array(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.array(_SCREAMING_SNAKE_CASE )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
UpperCAmelCase_ : List[str] = X - np.mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = np.cov(reference_distribution.T )
try:
UpperCAmelCase_ : Any = np.linalg.inv(_SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
UpperCAmelCase_ : List[str] = np.linalg.pinv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = np.dot(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.dot(_SCREAMING_SNAKE_CASE ,X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 30 | 0 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
SCREAMING_SNAKE_CASE = True
except (ImportError, ModuleNotFoundError):
SCREAMING_SNAKE_CASE = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowercase_ ( __A : str ) -> str:
"""simple docstring"""
re.sub('''<n>''' , '''''' , __A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__A ) )
| 94 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__a = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__a = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_a )
class __a:
"""simple docstring"""
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
elif titles is None or texts is None:
UpperCAmelCase_ : List[str] = titles if texts is None else texts
return super().__call__(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[Any] = titles if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [titles]
UpperCAmelCase_ : List[str] = texts if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [texts]
UpperCAmelCase_ : Any = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = questions if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [questions] * n_passages
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.''' )
UpperCAmelCase_ : Tuple = super().__call__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : int = super().__call__(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : Optional[int] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ : Dict = attention_mask
return self.pad(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 4 ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = reader_input['''input_ids''']
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = reader_output[:3]
UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = sorted(range(_SCREAMING_SNAKE_CASE ) ,reverse=_SCREAMING_SNAKE_CASE ,key=relevance_logits.__getitem__ )
UpperCAmelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_ : List[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_SCREAMING_SNAKE_CASE ,top_spans=_SCREAMING_SNAKE_CASE ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_SCREAMING_SNAKE_CASE ,start_index=_SCREAMING_SNAKE_CASE ,end_index=_SCREAMING_SNAKE_CASE ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = []
for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ : int = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] ,reverse=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
UpperCAmelCase_ : str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = ['''input_ids''', '''attention_mask'''] | 30 | 0 |
"""simple docstring"""
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ :
__magic_name__ = 42
__magic_name__ = None
@staticmethod
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
raise NotImplementedError
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : str , **lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
raise NotImplementedError
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> str:
raise NotImplementedError
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ) -> List[str]:
return f"""`pip install {cls.pip_package or cls.name}`"""
class UpperCamelCase_ (__A ):
__magic_name__ = '''optuna'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
return is_optuna_available()
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , **lowerCAmelCase_ : Tuple ) -> int:
return run_hp_search_optuna(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
return default_hp_space_optuna(lowerCAmelCase_ )
class UpperCamelCase_ (__A ):
__magic_name__ = '''ray'''
__magic_name__ = '''\'ray[tune]\''''
@staticmethod
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
return is_ray_available()
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , **lowerCAmelCase_ : Tuple ) -> List[str]:
return run_hp_search_ray(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Tuple ) -> Optional[int]:
return default_hp_space_ray(lowerCAmelCase_ )
class UpperCamelCase_ (__A ):
__magic_name__ = '''sigopt'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
return is_sigopt_available()
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : str , **lowerCAmelCase_ : str ) -> str:
return run_hp_search_sigopt(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Any ) -> Any:
return default_hp_space_sigopt(lowerCAmelCase_ )
class UpperCamelCase_ (__A ):
__magic_name__ = '''wandb'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( ) -> int:
return is_wandb_available()
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : str , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
return run_hp_search_wandb(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : List[str] ) -> Any:
return default_hp_space_wandb(lowerCAmelCase_ )
lowerCamelCase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def snake_case ( ):
UpperCAmelCase_ : List[str] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(A__ ) > 0:
UpperCAmelCase_ : Optional[Any] = available_backends[0].name
if len(A__ ) > 1:
logger.info(
F"""{len(A__ )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
"No hyperparameter search backend available.\n"
+ "\n".join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 95 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 | 0 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
__lowerCamelCase = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
__lowerCamelCase = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
__lowerCamelCase = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def lowerCamelCase__ ( self : Tuple ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def lowerCamelCase__ ( self : List[str] , __snake_case : List[str] , __snake_case : Dict ) -> List[str]:
__magic_name__: Union[str, Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
__magic_name__: List[str] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
__magic_name__: Tuple = evaluate(dataset=__snake_case , predictions=__snake_case )
return score
| 96 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''wav2vec2'''
def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.05 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="sum" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1_500) ,_SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Tuple = feat_extract_norm
UpperCAmelCase_ : List[Any] = feat_extract_activation
UpperCAmelCase_ : str = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = conv_bias
UpperCAmelCase_ : str = num_conv_pos_embeddings
UpperCAmelCase_ : Any = num_conv_pos_embedding_groups
UpperCAmelCase_ : Tuple = len(self.conv_dim )
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : str = hidden_dropout
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Tuple = activation_dropout
UpperCAmelCase_ : List[str] = feat_proj_dropout
UpperCAmelCase_ : int = final_dropout
UpperCAmelCase_ : Union[str, Any] = layerdrop
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Optional[int] = do_stable_layer_norm
UpperCAmelCase_ : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : Optional[int] = apply_spec_augment
UpperCAmelCase_ : Tuple = mask_time_prob
UpperCAmelCase_ : Optional[Any] = mask_time_length
UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks
UpperCAmelCase_ : Optional[Any] = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Union[str, Any] = num_codevectors_per_group
UpperCAmelCase_ : Any = num_codevector_groups
UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature
UpperCAmelCase_ : List[str] = feat_quantizer_dropout
UpperCAmelCase_ : Dict = num_negatives
UpperCAmelCase_ : List[str] = codevector_dim
UpperCAmelCase_ : List[str] = proj_codevector_dim
UpperCAmelCase_ : str = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : List[Any] = ctc_loss_reduction
UpperCAmelCase_ : List[str] = ctc_zero_infinity
# adapter
UpperCAmelCase_ : Optional[Any] = add_adapter
UpperCAmelCase_ : Any = adapter_kernel_size
UpperCAmelCase_ : Optional[int] = adapter_stride
UpperCAmelCase_ : List[Any] = num_adapter_layers
UpperCAmelCase_ : Optional[Any] = output_hidden_size or hidden_size
UpperCAmelCase_ : Optional[int] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = xvector_output_dim
@property
def a__ ( self ) -> Any:
return functools.reduce(operator.mul ,self.conv_stride ,1 ) | 30 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__a = logging.get_logger(__name__)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Any = ['pixel_values']
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = size if size is not None else {'''shortest_edge''': 2_5_6}
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowercase_ = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[str] , ) -> np.ndarray:
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase_ = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['''shortest_edge'''] , default_to_square=SCREAMING_SNAKE_CASE_ )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> np.ndarray:
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['''height'''], size['''width''']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> np.ndarray:
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Any , ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[float] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> str:
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ )
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
lowercase_ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ = {'''pixel_values''': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 97 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 | 0 |
'''simple docstring'''
import qiskit
def a__ ( lowercase : int, lowercase : int ) -> qiskit.result.counts.Counts:
"""simple docstring"""
_UpperCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_UpperCamelCase = qiskit.QuantumCircuit(lowercase, lowercase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1], [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCamelCase = qiskit.execute(lowercase, lowercase, shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase )
if __name__ == "__main__":
lowercase__ : List[str] = single_qubit_measure(2, 2)
print(F"""Total count for various states are: {counts}""")
| 98 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1 ) -> Dict:
UpperCAmelCase_ : List[Any] = tokenizer
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
UpperCAmelCase_ : Optional[int] = n_copies
def __iter__( self ) -> Any:
UpperCAmelCase_ : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : str = start_length
UpperCAmelCase_ : Optional[int] = eof_strings
UpperCAmelCase_ : str = tokenizer
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCAmelCase_ : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = defaultdict(_lowercase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowercase ) ):
with torch.no_grad():
UpperCAmelCase_ : Dict = batch['''ids'''].shape[-1]
UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(_lowercase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase )
# each task is generated batch_size times
UpperCAmelCase_ : Union[str, Any] = batch['''task_id'''].repeat(_lowercase )
UpperCAmelCase_ : Dict = accelerator.pad_across_processes(
_lowercase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCAmelCase_, UpperCAmelCase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase_ : Union[str, Any] = generated_tokens.cpu().numpy()
UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowercase , _lowercase ):
gen_token_dict[task].append(_lowercase )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_lowercase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
code_gens[task].append(remove_last_block(_lowercase ) )
return code_gens
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_lowercase )
UpperCAmelCase_ : int = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase_ : List[Any] = '''false'''
if args.num_workers is None:
UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase_ : int = Accelerator()
set_seed(args.seed , device_specific=_lowercase )
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ : Any = tokenizer.eos_token
UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase_ : str = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase_ : Tuple = load_dataset('''openai_humaneval''' )
UpperCAmelCase_ : Dict = load_metric('''code_eval''' )
UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
UpperCAmelCase_ : str = args.n_samples // args.batch_size
UpperCAmelCase_ : str = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase_ : Optional[Any] = DataLoader(_lowercase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(_lowercase , _lowercase )
UpperCAmelCase_ : int = complete_code(
_lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , )
if accelerator.is_main_process:
UpperCAmelCase_ : Any = []
for task in tqdm(range(_lowercase ) ):
UpperCAmelCase_ : int = human_eval['''test'''][task]['''test''']
UpperCAmelCase_ : str = f'''check({human_eval["test"][task]["entry_point"]})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase_, UpperCAmelCase_ : Any = code_eval_metric.compute(
references=_lowercase , predictions=_lowercase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(_lowercase , _lowercase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 30 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = ["""pixel_values"""]
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BILINEAR , __A = True , __A = None , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , **__A , ):
super().__init__(**__A )
__a = size if size is not None else {"""shortest_edge""": 256}
__a = get_size_dict(__A , default_to_square=__A )
__a = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__a = get_size_dict(__A )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case_ ( self , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ):
__a = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__a = get_resize_output_image_size(__A , size=size["""shortest_edge"""] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def snake_case_ ( self , __A , __A , __A = None , **__A , ):
__a = get_size_dict(__A )
return center_crop(__A , size=(size["""height"""], size["""width"""]) , data_format=__A , **__A )
def snake_case_ ( self , __A , __A , __A = None , **__A ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def snake_case_ ( self , __A , __A , __A , __A = None , **__A , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def snake_case_ ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = size if size is not None else self.size
__a = get_size_dict(__A , default_to_square=__A )
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(__A )
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__a = [to_numpy_array(__A ) for image in images]
if do_resize:
__a = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
__a = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
__a = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
__a = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
__a = [to_channel_dimension_format(__A , __A ) for image in images]
__a = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
| 99 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__a = logging.get_logger(__name__)
__a = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''imagegpt'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=512 + 1 ,_SCREAMING_SNAKE_CASE=32 * 32 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : Union[str, Any] = n_embd
UpperCAmelCase_ : Any = n_layer
UpperCAmelCase_ : Optional[Any] = n_head
UpperCAmelCase_ : Union[str, Any] = n_inner
UpperCAmelCase_ : List[Any] = activation_function
UpperCAmelCase_ : List[str] = resid_pdrop
UpperCAmelCase_ : str = embd_pdrop
UpperCAmelCase_ : Optional[Any] = attn_pdrop
UpperCAmelCase_ : Dict = layer_norm_epsilon
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Dict = scale_attn_weights
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : List[str] = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ : Tuple = reorder_and_upcast_attn
UpperCAmelCase_ : int = tie_word_embeddings
super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
class __a( _a ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,) -> Mapping[str, Any]:
UpperCAmelCase_ : Any = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) )
return inputs | 30 | 0 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_A : Optional[int] = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def __snake_case ( lowerCAmelCase_=True ) -> Union[str, Any]:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__SCREAMING_SNAKE_CASE ) )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ : Any = None
lowerCamelCase__ : Optional[int] = None
def lowercase_ ( self , A_ , A_ ):
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = dataset_module_factory(A_ , cache_dir=A_ )
SCREAMING_SNAKE_CASE__ = import_main_class(dataset_module.module_path , dataset=A_ )
SCREAMING_SNAKE_CASE__ = builder_cls(
cache_dir=A_ , config_name=A_ , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE__ = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=A_ ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
SCREAMING_SNAKE_CASE__ = cached_path(A_ , cache_dir=A_ )
self.assertTrue(os.path.exists(A_ ) )
@pytest.mark.integration
def __snake_case ( lowerCAmelCase_ ) -> List[str]:
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
SCREAMING_SNAKE_CASE__ = dataset_module_factory('''wikipedia''' , cache_dir=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = import_main_class(dataset_module.module_path )
SCREAMING_SNAKE_CASE__ = builder_cls(
cache_dir=lowerCAmelCase_ , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
SCREAMING_SNAKE_CASE__ = None
builder_instance.download_and_prepare()
SCREAMING_SNAKE_CASE__ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __snake_case ( lowerCAmelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE__ = dataset_module_factory('''wikipedia''' , cache_dir=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = import_main_class(dataset_module.module_path , dataset=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = builder_cls(
cache_dir=lowerCAmelCase_ , config_name='''20220301.frr''' , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE__ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
assert "train" in ds
assert isinstance(ds['''train'''] , lowerCAmelCase_ )
assert next(iter(ds['''train'''] ) )
| 100 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__a = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
__a = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase_ : Union[str, Any] = int(re.match(r'''.*layer_(\d*).*''' , _lowercase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase_ : Any = re.search(r'''[^\d](\d+)$''' , str(_lowercase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
UpperCAmelCase_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if bloom_config_file == "":
UpperCAmelCase_ : Tuple = BloomConfig()
else:
UpperCAmelCase_ : Optional[int] = BloomConfig.from_json_file(_lowercase )
if shard_model:
UpperCAmelCase_ : Any = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = {'''weight_map''': {}, '''metadata''': {}}
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = BloomConfig()
for j, file in enumerate(_lowercase ):
print('''Processing file: {}'''.format(_lowercase ) )
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : Tuple = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Any = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : Dict = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Union[str, Any] = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : Tuple = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : List[str] = tensors[key] / pretraining_tp
torch.save(
_lowercase , os.path.join(
_lowercase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase_ : Union[str, Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) )
UpperCAmelCase_ : List[Any] = BloomConfig()
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : List[str] = total_size
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_lowercase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Optional[Any] = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n'''
f.write(_lowercase )
else:
UpperCAmelCase_ : Any = BloomModel(_lowercase )
UpperCAmelCase_ : Tuple = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = None
for i, file in enumerate(_lowercase ):
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : List[Any] = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Optional[int] = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : str = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Dict = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : Dict = tensors[key] / pretraining_tp
UpperCAmelCase_ : Tuple = model.load_state_dict(_lowercase , strict=_lowercase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
UpperCAmelCase_ : Union[str, Any] = set(other_keys.missing_keys )
else:
UpperCAmelCase_ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCAmelCase_ : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
UpperCAmelCase_ : Optional[int] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _lowercase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
__a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 30 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 101 |
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
for i in range(1 , 1001 ):
total += i**i
return str(_lowercase )[-10:]
if __name__ == "__main__":
print(solution()) | 30 | 0 |
"""simple docstring"""
import numpy
class lowercase__ :
"""simple docstring"""
def __init__( self , _A , _A ):
'''simple docstring'''
UpperCamelCase : Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
UpperCamelCase : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
UpperCamelCase : Union[str, Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
UpperCamelCase : Dict = numpy.random.rand(3 , 1 )
# Real output values provided.
UpperCamelCase : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
UpperCamelCase : Optional[Any] = numpy.zeros(output_array.shape )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Tuple = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
UpperCamelCase : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
UpperCamelCase : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Dict = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
UpperCamelCase : List[str] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
UpperCamelCase : List[str] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _a ( self , _A , _A , _A ):
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
UpperCamelCase : int = self.feedforward()
self.back_propagation()
if give_loss:
UpperCamelCase : Dict = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f"""Iteration {iteration} Loss: {loss}""" )
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : List[Any] = input_arr
UpperCamelCase : int = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
UpperCamelCase : str = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
UpperCamelCase : int = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
return 1 / (1 + numpy.exp(-value ))
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
return (value) * (1 - (value))
def UpperCamelCase ():
UpperCamelCase : str = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
UpperCamelCase : Optional[int] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
UpperCamelCase : Dict = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE , output_array=SCREAMING_SNAKE_CASE )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE , iterations=10 , give_loss=SCREAMING_SNAKE_CASE )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 102 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__a = None
__a = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__a = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = None
# Automatically constructed
lowerCAmelCase = "PIL.Image.Image"
lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase = field(default='''Image''' , init=_a , repr=_a )
def __call__( self ) -> Tuple:
return self.pa_type
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Dict = path.split('''::''' )[-1]
try:
UpperCAmelCase_ : Optional[int] = string_to_dict(_SCREAMING_SNAKE_CASE ,config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase_ : Tuple = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
UpperCAmelCase_ : Optional[Any] = None
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ,use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
UpperCAmelCase_ : List[str] = BytesIO(f.read() )
UpperCAmelCase_ : Optional[Any] = PIL.Image.open(bytes_ )
else:
UpperCAmelCase_ : List[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCAmelCase_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase_ : Dict = storage.field('''bytes''' )
else:
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase_ : int = storage.field('''path''' )
else:
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f:
UpperCAmelCase_ : Any = f.read()
return bytes_
UpperCAmelCase_ : Union[str, Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
UpperCAmelCase_ : List[str] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,)
UpperCAmelCase_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def lowerCamelCase__ ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase_ : Optional[int] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase_ : int = image.format
else:
UpperCAmelCase_ : List[Any] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(_lowercase , format=_lowercase )
return buffer.getvalue()
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if hasattr(_lowercase , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
UpperCAmelCase_ : Tuple = array.dtype
UpperCAmelCase_ : List[str] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
UpperCAmelCase_ : Dict = dtype.kind
UpperCAmelCase_ : Union[str, Any] = dtype.itemsize
UpperCAmelCase_ : Optional[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase_ : Tuple = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase_ : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase_ : Union[str, Any] = dtype_byteorder + dtype_kind + str(_lowercase )
UpperCAmelCase_ : str = np.dtype(_lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCAmelCase_ : Any = PIL.Image.fromarray(array.astype(_lowercase ) )
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
UpperCAmelCase_, UpperCAmelCase_ : Tuple = first_non_null_value(_lowercase )
if isinstance(_lowercase , _lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowercase , np.ndarray ):
UpperCAmelCase_ : Any = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
elif isinstance(_lowercase , PIL.Image.Image ):
UpperCAmelCase_ : Union[str, Any] = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
else:
return objs
else:
return objs | 30 | 0 |
"""simple docstring"""
import operator
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = None ) -> list:
_snake_case = operator.lt if reverse else operator.gt
_snake_case = solution or []
if not arr:
return solution
_snake_case = [arr.pop(0 )]
for i, item in enumerate(lowerCAmelCase_ ):
if _operator(lowerCAmelCase_ , sublist[-1] ):
sublist.append(lowerCAmelCase_ )
arr.pop(lowerCAmelCase_ )
# merging sublist into solution list
if not solution:
solution.extend(lowerCAmelCase_ )
else:
while sublist:
_snake_case = sublist.pop(0 )
for i, xx in enumerate(lowerCAmelCase_ ):
if not _operator(lowerCAmelCase_ , lowerCAmelCase_ ):
solution.insert(lowerCAmelCase_ , lowerCAmelCase_ )
break
else:
solution.append(lowerCAmelCase_ )
strand_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 103 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
UpperCAmelCase_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
UpperCAmelCase_ : Optional[int] = load_dataset('''nielsr/rvlcdip-demo''' )
UpperCAmelCase_ : Optional[Any] = dataset['''train'''][0]['''image'''].convert('''RGB''' )
UpperCAmelCase_ : str = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.logits
UpperCAmelCase_ : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.float ,)
self.assertTrue(torch.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) | 30 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 104 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) | 30 | 0 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=2 ,snake_case__=56 ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=2 ,snake_case__=2 ,snake_case__=7 ,snake_case__="gelu_new" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=4 ,snake_case__="block_sparse" ,snake_case__=True ,snake_case__=False ,snake_case__=2 ,snake_case__=3 ,):
SCREAMING_SNAKE_CASE_ : Tuple = parent
SCREAMING_SNAKE_CASE_ : int = batch_size
SCREAMING_SNAKE_CASE_ : str = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : int = use_attention_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : str = vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : int = type_vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Any = rescale_embeddings
SCREAMING_SNAKE_CASE_ : int = attention_type
SCREAMING_SNAKE_CASE_ : Any = use_bias
SCREAMING_SNAKE_CASE_ : Optional[int] = block_size
SCREAMING_SNAKE_CASE_ : Dict = num_random_blocks
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : int = BigBirdConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,attention_type=self.attention_type ,block_size=self.block_size ,num_random_blocks=self.num_random_blocks ,use_bias=self.use_bias ,rescale_embeddings=self.rescale_embeddings ,)
return config, input_ids, token_type_ids, attention_mask
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_ : int = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : int = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__a : List[Any] = False
__a : Any = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case ( self ):
super().test_hidden_states_output()
@slow
def snake_case ( self ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Dict = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(snake_case__ )
def snake_case ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ : str = self._prepare_for_class(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model_class(snake_case__ )
@jax.jit
def model_jitted(snake_case__ ,snake_case__=None ,**snake_case__ ):
return model(input_ids=snake_case__ ,attention_mask=snake_case__ ,**snake_case__ )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = model_jitted(**snake_case__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ : Any = model_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) ,len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ ,snake_case__ ):
self.assertEqual(jitted_output.shape ,output.shape )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=1E-5 ,snake_case__="outputs" ,snake_case__=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
| 105 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0.9 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,) -> Optional[int]:
UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 30}
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Any = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize_and_center_crop
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : List[str] = crop_pct
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : Any = do_normalize
UpperCAmelCase_ : str = image_mean
UpperCAmelCase_ : List[Any] = image_std
def a__ ( self ) -> str:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def a__ ( self ) -> Dict:
UpperCAmelCase_ : str = PoolFormerImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''crop_pct''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_mean''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_std''' ) )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size ,{'''height''': 30, '''width''': 30} )
UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,) | 30 | 0 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__snake_case :List[str] ='\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
__snake_case :Union[str, Any] ='\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
__snake_case :Optional[Any] ='\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def __UpperCamelCase ( self : List[str] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Any=4 , __UpperCamelCase : Tuple=False ) -> str:
A = compute_bleu(
reference_corpus=__UpperCamelCase , translation_corpus=__UpperCamelCase , max_order=__UpperCamelCase , smooth=__UpperCamelCase )
((A) , (A) , (A) , (A) , (A) , (A)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 106 |
import unittest
import numpy as np
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = None , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = np.shape(_lowercase )
UpperCAmelCase_ : Optional[Any] = np.shape(_lowercase )
UpperCAmelCase_ : Tuple = np.shape(_lowercase )
if shape_a[0] != shape_b[0]:
UpperCAmelCase_ : Tuple = (
'''Expected the same number of rows for A and B. '''
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(_lowercase )
if shape_b[1] != shape_c[1]:
UpperCAmelCase_ : List[Any] = (
'''Expected the same number of columns for B and C. '''
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(_lowercase )
UpperCAmelCase_ : Dict = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase_ : Any = np.linalg.inv(_lowercase )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : List[str] = np.array([[2, 1], [6, 3]] )
UpperCAmelCase_ : Tuple = schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.block([[a, b], [b.T, c]] )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = np.linalg.det(_SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE ,det_a * det_s )
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> None:
UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : int = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 30 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_UpperCAmelCase : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 107 |
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
UpperCAmelCase_ : Any = ''''''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
UpperCAmelCase_ : Any = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase_ : Union[str, Any] = B'''=''' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
UpperCAmelCase_ : int = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowercase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Tuple = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase , _lowercase ):
try:
UpperCAmelCase_ : Any = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
UpperCAmelCase_ : str = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase_ : List[Any] = encoded_data[:-padding]
UpperCAmelCase_ : List[Any] = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase_ : Tuple = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase_ : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowercase ) , 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a: Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = ['''pixel_values''']
def __init__( self : Any , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 255 , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : bool = True , **lowerCamelCase : List[Any] , ) -> None:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = size if size is not None else {"""height""": 384, """width""": 384}
_UpperCAmelCase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCAmelCase = do_convert_rgb
def lowerCamelCase ( self : Any , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : str , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
_UpperCAmelCase = (size["""height"""], size["""width"""])
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : Tuple , lowerCamelCase : np.ndarray , lowerCamelCase : Union[int, float] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Dict , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : Tuple , lowerCamelCase : np.ndarray , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : List[Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : Dict , lowerCamelCase : ImageInput , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[Dict[str, int]] = None , lowerCamelCase : PILImageResampling = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[float] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : bool = None , lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase : List[str] , ) -> PIL.Image.Image:
"""simple docstring"""
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_UpperCAmelCase = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCAmelCase = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_UpperCAmelCase = BatchFeature(data={"""pixel_values""": images} , tensor_type=lowerCamelCase )
return encoded_outputs | 108 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 0
@slow
def a__ ( self ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,20 )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained('''./''' ,tokenizer_type='''xxx''' )
@require_tokenizers
def a__ ( self ) -> Optional[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase_ : Any = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
else:
self.assertEqual(tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
@require_tokenizers
def a__ ( self ) -> List[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' ,):
UpperCAmelCase_ : int = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def a__ ( self ) -> Optional[Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
UpperCAmelCase_ : int = TOKENIZER_MAPPING.values()
UpperCAmelCase_ : List[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Tuple:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ,use_fast=_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = '''Hello, world. How are you?'''
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
@require_tokenizers
def a__ ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
self.assertEqual(tokenizer.vocab_size ,30_000 )
self.assertEqual(tokenizer.unk_token ,'''[UNK]''' )
self.assertEqual(tokenizer.padding_side ,'''right''' )
self.assertEqual(tokenizer.truncation_side ,'''right''' )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size ,12 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
# Check we can load the tokenizer config of an online model.
UpperCAmelCase_ : int = get_tokenizer_config('''bert-base-cased''' )
UpperCAmelCase_ : Optional[int] = config.pop('''_commit_hash''' ,_SCREAMING_SNAKE_CASE )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_SCREAMING_SNAKE_CASE ,{'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase_ : Any = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] ,'''BertTokenizer''' )
def a__ ( self ) -> str:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def a__ ( self ) -> int:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
# Can register in two steps
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = False
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = NewTokenizer
lowerCAmelCase = False
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
def a__ ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' )
def a__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' )
def a__ ( self ) -> Any:
# Make sure we have cached the tokenizer.
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 ) | 30 | 0 |
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
a = {
"b0": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 224,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1280,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 240,
"dropout_rate": 0.2,
"dw_padding": [16],
},
"b2": {
"hidden_dim": 1408,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 260,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 16],
},
"b3": {
"hidden_dim": 1536,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 300,
"dropout_rate": 0.3,
"dw_padding": [5, 18],
},
"b4": {
"hidden_dim": 1792,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 380,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2048,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 456,
"dropout_rate": 0.4,
"dw_padding": [13, 27],
},
"b6": {
"hidden_dim": 2304,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 528,
"dropout_rate": 0.5,
"dw_padding": [31],
},
"b7": {
"hidden_dim": 2560,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 600,
"dropout_rate": 0.5,
"dw_padding": [18],
},
}
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = EfficientNetConfig()
__SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""hidden_dim"""]
__SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""width_coef"""]
__SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""depth_coef"""]
__SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""image_size"""]
__SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""dropout_rate"""]
__SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""dw_padding"""]
__SCREAMING_SNAKE_CASE = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE = 1000
__SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""image_size"""]
__SCREAMING_SNAKE_CASE = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=__UpperCAmelCase , )
return preprocessor
def __magic_name__ ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
__SCREAMING_SNAKE_CASE = sorted(set(__UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = {b: str(__UpperCAmelCase ) for b, i in zip(__UpperCAmelCase , range(__UpperCAmelCase ) )}
__SCREAMING_SNAKE_CASE = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
__SCREAMING_SNAKE_CASE = block_name_mapping[b]
rename_keys.append((f"""block{b}_expand_conv/kernel:0""", f"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((f"""block{b}_expand_bn/gamma:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((f"""block{b}_expand_bn/beta:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_expand_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(f"""block{b}_dwconv/depthwise_kernel:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((f"""block{b}_bn/gamma:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((f"""block{b}_bn/beta:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(f"""block{b}_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(f"""block{b}_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((f"""block{b}_se_reduce/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((f"""block{b}_se_reduce/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((f"""block{b}_se_expand/kernel:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((f"""block{b}_se_expand/bias:0""", f"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(f"""block{b}_project_conv/kernel:0""", f"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((f"""block{b}_project_bn/gamma:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((f"""block{b}_project_bn/beta:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_mean:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(f"""block{b}_project_bn/moving_variance:0""", f"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
__SCREAMING_SNAKE_CASE = {}
for item in rename_keys:
if item[0] in original_param_names:
__SCREAMING_SNAKE_CASE = """efficientnet.""" + item[1]
__SCREAMING_SNAKE_CASE = """classifier.weight"""
__SCREAMING_SNAKE_CASE = """classifier.bias"""
return key_mapping
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
__SCREAMING_SNAKE_CASE = key_mapping[key]
if "_conv" in key and "kernel" in key:
__SCREAMING_SNAKE_CASE = torch.from_numpy(__UpperCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__SCREAMING_SNAKE_CASE = torch.from_numpy(__UpperCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__SCREAMING_SNAKE_CASE = torch.from_numpy(np.transpose(__UpperCAmelCase ) )
else:
__SCREAMING_SNAKE_CASE = torch.from_numpy(__UpperCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_classes[model_name](
include_top=__UpperCAmelCase , weights="""imagenet""" , input_tensor=__UpperCAmelCase , input_shape=__UpperCAmelCase , pooling=__UpperCAmelCase , classes=1000 , classifier_activation="""softmax""" , )
__SCREAMING_SNAKE_CASE = original_model.trainable_variables
__SCREAMING_SNAKE_CASE = original_model.non_trainable_variables
__SCREAMING_SNAKE_CASE = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__SCREAMING_SNAKE_CASE = param.numpy()
__SCREAMING_SNAKE_CASE = list(tf_params.keys() )
# Load HuggingFace model
__SCREAMING_SNAKE_CASE = get_efficientnet_config(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = EfficientNetForImageClassification(__UpperCAmelCase ).eval()
__SCREAMING_SNAKE_CASE = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
__SCREAMING_SNAKE_CASE = rename_keys(__UpperCAmelCase )
replace_params(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Initialize preprocessor and preprocess input image
__SCREAMING_SNAKE_CASE = convert_image_processor(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = hf_model(**__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = outputs.logits.detach().numpy()
# Original model inference
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = CONFIG_MAP[model_name]["""image_size"""]
__SCREAMING_SNAKE_CASE = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__SCREAMING_SNAKE_CASE = image.img_to_array(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = np.expand_dims(__UpperCAmelCase , axis=0 )
__SCREAMING_SNAKE_CASE = original_model.predict(__UpperCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(__UpperCAmelCase ):
os.mkdir(__UpperCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(__UpperCAmelCase )
preprocessor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(f"""Pushing converted {model_name} to the hub...""" )
__SCREAMING_SNAKE_CASE = f"""efficientnet-{model_name}"""
preprocessor.push_to_hub(__UpperCAmelCase )
hf_model.push_to_hub(__UpperCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
a = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 109 |
from functools import reduce
__a = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase__ ( _lowercase = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) )
for i in range(len(_lowercase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 30 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : Tuple = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_snake_case ,_snake_case )
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ : Tuple = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
UpperCAmelCase__ : int = s_dict.pop(_snake_case )
elif "subsample" in key:
UpperCAmelCase__ : Union[str, Any] = s_dict.pop(_snake_case )
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = emb.weight.shape
UpperCAmelCase__ : Optional[Any] = nn.Linear(_snake_case ,_snake_case ,bias=_snake_case )
UpperCAmelCase__ : Tuple = emb.weight.data
return lin_layer
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : List[str] = torch.load(_snake_case ,map_location='cpu' )
UpperCAmelCase__ : Union[str, Any] = mam_aaa['args']
UpperCAmelCase__ : List[Any] = mam_aaa['model']
UpperCAmelCase__ : str = state_dict['decoder.output_projection.weight']
remove_ignore_keys_(_snake_case )
rename_keys(_snake_case )
UpperCAmelCase__ : Union[str, Any] = state_dict['decoder.embed_tokens.weight'].shape[0]
UpperCAmelCase__ : Optional[int] = args.share_decoder_input_output_embed
UpperCAmelCase__ : Tuple = [int(_snake_case ) for i in args.conv_kernel_sizes.split(',' )]
UpperCAmelCase__ : List[Any] = SpeechaTextConfig(
vocab_size=_snake_case ,max_source_positions=args.max_source_positions ,max_target_positions=args.max_target_positions ,encoder_layers=args.encoder_layers ,decoder_layers=args.decoder_layers ,encoder_attention_heads=args.encoder_attention_heads ,decoder_attention_heads=args.decoder_attention_heads ,encoder_ffn_dim=args.encoder_ffn_embed_dim ,decoder_ffn_dim=args.decoder_ffn_embed_dim ,d_model=args.encoder_embed_dim ,dropout=args.dropout ,attention_dropout=args.attention_dropout ,activation_dropout=args.activation_dropout ,activation_function='relu' ,num_conv_layers=len(_snake_case ) ,conv_channels=args.conv_channels ,conv_kernel_sizes=_snake_case ,input_feat_per_channel=args.input_feat_per_channel ,input_channels=args.input_channels ,tie_word_embeddings=_snake_case ,num_beams=5 ,max_length=200 ,use_cache=_snake_case ,decoder_start_token_id=2 ,early_stopping=_snake_case ,)
UpperCAmelCase__ : Optional[Any] = SpeechaTextForConditionalGeneration(_snake_case )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = model.model.load_state_dict(_snake_case ,strict=_snake_case )
if len(_snake_case ) > 0 and not set(_snake_case ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F''' but all the following weights are missing {missing}''' )
if tie_embeds:
UpperCAmelCase__ : Any = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCAmelCase__ : Union[str, Any] = lm_head_weights
model.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
UpperCamelCase__ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 110 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
UpperCAmelCase_ : Tuple = precision
UpperCAmelCase_ : Optional[Any] = ceil(precision / 14 )
UpperCAmelCase_ : int = 426880 * Decimal(10005 ).sqrt()
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : List[Any] = 13591409
UpperCAmelCase_ : Optional[Any] = Decimal(_lowercase )
for k in range(1 , _lowercase ):
UpperCAmelCase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__a = 50
print(F"""The first {n} digits of pi is: {pi(n)}""") | 30 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''distilbert'''
__SCREAMING_SNAKE_CASE = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , lowerCamelCase=3_05_22 , lowerCamelCase=5_12 , lowerCamelCase=False , lowerCamelCase=6 , lowerCamelCase=12 , lowerCamelCase=7_68 , lowerCamelCase=4 * 7_68 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase="gelu" , lowerCamelCase=0.02 , lowerCamelCase=0.1 , lowerCamelCase=0.2 , lowerCamelCase=0 , **lowerCamelCase , ) -> Tuple:
'''simple docstring'''
UpperCamelCase : Dict = vocab_size
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : Dict = sinusoidal_pos_embds
UpperCamelCase : Optional[Any] = n_layers
UpperCamelCase : Any = n_heads
UpperCamelCase : Optional[int] = dim
UpperCamelCase : str = hidden_dim
UpperCamelCase : Optional[Any] = dropout
UpperCamelCase : Tuple = attention_dropout
UpperCamelCase : Union[str, Any] = activation
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : Dict = qa_dropout
UpperCamelCase : Optional[int] = seq_classif_dropout
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 173 |
from __future__ import annotations
import math
__a = '2020.9.26'
__a = 'xcodz-dot, cclaus, dhruvmanila'
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ):
UpperCAmelCase_ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(_lowercase )
UpperCAmelCase_ : Tuple = ((x * distance) / (z + distance)) * scale
UpperCAmelCase_ : str = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Axis must be a str''' )
UpperCAmelCase_ : Optional[Any] = locals()
del input_variables["axis"]
if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ):
UpperCAmelCase_ : List[Any] = (
'''Input values except axis must either be float or int: '''
f'''{list(input_variables.values() )}'''
)
raise TypeError(_lowercase )
UpperCAmelCase_ : Dict = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
UpperCAmelCase_ : Optional[int] = x * math.cos(_lowercase ) - y * math.sin(_lowercase )
UpperCAmelCase_ : List[Any] = y * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z
elif axis == "x":
UpperCAmelCase_ : Any = y * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : int = z * math.cos(_lowercase ) + y * math.sin(_lowercase )
UpperCAmelCase_ : Dict = x
elif axis == "y":
UpperCAmelCase_ : Union[str, Any] = x * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""") | 30 | 0 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class lowercase_ (unittest.TestCase ):
def __init__( self : Any , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
def __a ( self : Optional[Any] ):
"""simple docstring"""
return {}
def _a ( )-> Optional[int]:
SCREAMING_SNAKE_CASE_ = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
SCREAMING_SNAKE_CASE_ = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class lowercase_ (_a , unittest.TestCase ):
lowerCAmelCase__ =MarkupLMFeatureExtractor if is_bsa_available() else None
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = MarkupLMFeatureExtractionTester(self )
@property
def __a ( self : int ):
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class()
# Test not batched input
SCREAMING_SNAKE_CASE_ = get_html_strings()[0]
SCREAMING_SNAKE_CASE_ = feature_extractor(_SCREAMING_SNAKE_CASE )
# fmt: off
SCREAMING_SNAKE_CASE_ = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
SCREAMING_SNAKE_CASE_ = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , _SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.xpaths , _SCREAMING_SNAKE_CASE )
# Test batched
SCREAMING_SNAKE_CASE_ = get_html_strings()
SCREAMING_SNAKE_CASE_ = feature_extractor(_SCREAMING_SNAKE_CASE )
# fmt: off
SCREAMING_SNAKE_CASE_ = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
SCREAMING_SNAKE_CASE_ = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , _SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.xpaths , _SCREAMING_SNAKE_CASE ) | 360 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__a = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__a = concatenate_datasets
__a = DownloadConfig
__a = DownloadManager
__a = DownloadMode
__a = DownloadConfig
__a = DownloadMode
__a = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 30 | 0 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 32
def A__ ( A__ , A__ = 16 , A__ = "bert-base-cased" ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = AutoTokenizer.from_pretrained(_lowercase )
_UpperCAmelCase = load_dataset("glue" , "mrpc" )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase = datasets.map(
_lowercase , batched=_lowercase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowercase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets["train"] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
_UpperCAmelCase = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def A__ ( A__ , A__ , A__ , A__ ) -> Optional[int]:
'''simple docstring'''
model.eval()
_UpperCAmelCase = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**_lowercase )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_UpperCAmelCase = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
_UpperCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_UpperCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
_UpperCAmelCase = metric.compute()
return eval_metric["accuracy"]
def A__ ( A__ , A__ ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config['''lr''']
_UpperCAmelCase = int(config["num_epochs"] )
_UpperCAmelCase = int(config["seed"] )
_UpperCAmelCase = int(config["batch_size"] )
_UpperCAmelCase = args.model_name_or_path
set_seed(_lowercase )
_UpperCAmelCase = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
_UpperCAmelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
_UpperCAmelCase = 1
_UpperCAmelCase = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
_UpperCAmelCase = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase = 0
_UpperCAmelCase = evaluate.load("glue" , "mrpc" )
_UpperCAmelCase = num_epochs
if args.partial_train_epoch is not None:
_UpperCAmelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_UpperCAmelCase = args.resume_from_checkpoint.split("epoch_" )[1]
_UpperCAmelCase = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_UpperCAmelCase = int(_lowercase ) + 1
_UpperCAmelCase = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase )
accelerator.print("resumed checkpoint performance:" , _lowercase )
accelerator.print("resumed checkpoint\'s scheduler\'s lr:" , lr_scheduler.get_lr()[0] )
accelerator.print("resumed optimizers\'s lr:" , optimizer.param_groups[0]["lr"] )
with open(os.path.join(args.output_dir , F"""state_{starting_epoch-1}.json""" ) , "r" ) as f:
_UpperCAmelCase = json.load(_lowercase )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_UpperCAmelCase = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
_UpperCAmelCase = model(**_lowercase )
_UpperCAmelCase = outputs.loss
_UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_UpperCAmelCase = F"""epoch_{epoch}"""
_UpperCAmelCase = os.path.join(args.output_dir , _lowercase )
accelerator.save_state(_lowercase )
_UpperCAmelCase = evaluation_loop(_lowercase , _lowercase , _lowercase , _lowercase )
_UpperCAmelCase = accuracy
_UpperCAmelCase = lr_scheduler.get_lr()[0]
_UpperCAmelCase = optimizer.param_groups[0]['''lr''']
_UpperCAmelCase = epoch
_UpperCAmelCase = overall_step
accelerator.print(F"""epoch {epoch}:""" , _lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"""state_{epoch}.json""" ) , "w" ) as f:
json.dump(_lowercase , _lowercase )
def A__ ( ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=_lowercase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_lowercase , )
parser.add_argument(
"--output_dir" , type=_lowercase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=_lowercase , default=_lowercase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--partial_train_epoch" , type=_lowercase , default=_lowercase , help="If passed, the training will stop after this number of epochs." , )
parser.add_argument(
"--num_epochs" , type=_lowercase , default=2 , help="Number of train epochs." , )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 426 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
while a != 0:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = b % a, a
return b
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if gcd(_lowercase , _lowercase ) != 1:
UpperCAmelCase_ : int = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowercase )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = 1, 0, a
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = 0, 1, m
while va != 0:
UpperCAmelCase_ : List[Any] = ua // va
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 30 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : Tuple = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __UpperCamelCase ( _a ):
A_ = "speech_to_text"
A_ = ["past_key_values"]
A_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , __a=1_0000 , __a=12 , __a=2048 , __a=4 , __a=6 , __a=2048 , __a=4 , __a=0.0 , __a=0.0 , __a=True , __a=True , __a="relu" , __a=256 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=2 , __a=True , __a=1 , __a=0 , __a=2 , __a=6000 , __a=1024 , __a=2 , __a=(5, 5) , __a=1024 , __a=80 , __a=1 , **__a , ):
'''simple docstring'''
__a : Dict = vocab_size
__a : Any = d_model
__a : Union[str, Any] = encoder_ffn_dim
__a : List[str] = encoder_layers
__a : Tuple = encoder_attention_heads
__a : List[str] = decoder_ffn_dim
__a : int = decoder_layers
__a : int = decoder_attention_heads
__a : Tuple = dropout
__a : Any = attention_dropout
__a : Dict = activation_dropout
__a : Optional[Any] = activation_function
__a : int = init_std
__a : Dict = encoder_layerdrop
__a : List[str] = decoder_layerdrop
__a : Optional[int] = use_cache
__a : List[str] = encoder_layers
__a : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
__a : str = max_source_positions
__a : Any = max_target_positions
__a : Optional[int] = num_conv_layers
__a : Optional[int] = list(_SCREAMING_SNAKE_CASE )
__a : str = conv_channels
__a : List[Any] = input_feat_per_channel
__a : List[str] = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
f"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
f"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
| 476 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCAmelCase = '''CIDAS/clipseg-rd64-refined'''
lowerCAmelCase = '''image_segmenter'''
lowerCAmelCase = CLIPSegForImageSegmentation
lowerCAmelCase = ['''image''', '''text''']
lowerCAmelCase = ['''image''']
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self ,['''vision'''] )
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.pre_processor(text=[label] ,images=[image] ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
with torch.no_grad():
UpperCAmelCase_ : Dict = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : Dict = outputs.cpu().detach().numpy()
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) ) | 30 | 0 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowerCamelCase__: Tuple = 0
@slow
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
lowerCamelCase__: Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
lowerCamelCase__: Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) , 0 )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowerCamelCase__: List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowerCamelCase__: int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Check that tokenizer_type ≠ model_type
lowerCamelCase__: Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(_SCREAMING_SNAKE_CASE , """vocab.txt""" ) )
lowerCamelCase__: Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , tokenizer_type="""bert""" , use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(_SCREAMING_SNAKE_CASE , """merges.txt""" ) )
lowerCamelCase__: Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , tokenizer_type="""gpt2""" , use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@require_tokenizers
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(_SCREAMING_SNAKE_CASE , """vocab.txt""" ) )
lowerCamelCase__: Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , tokenizer_type="""bert""" )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(_SCREAMING_SNAKE_CASE , """merges.txt""" ) )
lowerCamelCase__: Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , tokenizer_type="""gpt2""" )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
with pytest.raises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
lowerCamelCase__: Any = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _SCREAMING_SNAKE_CASE )
else:
self.assertEqual(tokenizer.do_lower_case , _SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
lowerCamelCase__: int = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: int = TOKENIZER_MAPPING.values()
lowerCamelCase__: List[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_SCREAMING_SNAKE_CASE )
@require_tokenizers
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , _SCREAMING_SNAKE_CASE )
@require_tokenizers
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=_SCREAMING_SNAKE_CASE )
lowerCamelCase__: int = '''Hello, world. How are you?'''
lowerCamelCase__: List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual("""[UNK]""" , tokens[0] )
lowerCamelCase__: Any = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=_SCREAMING_SNAKE_CASE )
lowerCamelCase__: Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase__: Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: Tuple = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowerCamelCase__: int = get_tokenizer_config("""bert-base-cased""" )
lowerCamelCase__: Optional[int] = config.pop("""_commit_hash""" , _SCREAMING_SNAKE_CASE )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_SCREAMING_SNAKE_CASE , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
lowerCamelCase__: Any = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
lowerCamelCase__: Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase__: Dict = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" , _SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE , slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE , slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
lowerCamelCase__: str = CustomTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase__: Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" , _SCREAMING_SNAKE_CASE )
# Can register in two steps
AutoTokenizer.register(_SCREAMING_SNAKE_CASE , slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE , fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_SCREAMING_SNAKE_CASE , slow_tokenizer_class=_SCREAMING_SNAKE_CASE , fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE , fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__: List[str] = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase__: str = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase__: Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase__: Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCamelCase__: Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCamelCase__: Union[str, Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
lowerCamelCase__: Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase__: int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowerCamelCase__: List[str] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase__: Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , trust_remote_code=_SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
class lowerCamelCase__ ( _a ):
__lowerCamelCase = False
class lowerCamelCase__ ( _a ):
__lowerCamelCase = NewTokenizer
__lowerCamelCase = False
try:
AutoConfig.register("""custom""" , _SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE , slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE , fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
lowerCamelCase__: int = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
lowerCamelCase__: Any = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
lowerCamelCase__: Tuple = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
lowerCamelCase__: List[str] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
lowerCamelCase__: Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
lowerCamelCase__: Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=_SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
lowerCamelCase__: Tuple = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=_SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCamelCase__: Tuple = AutoTokenizer.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCamelCase__: str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__: Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
lowerCamelCase__: List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 306 |
import numpy as np
import datasets
__a = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__a = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__a = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' ,id='''sequence''' ) ,id='''X''' ),
} ) ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# convert to numpy arrays
UpperCAmelCase_ : str = np.array(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.array(_SCREAMING_SNAKE_CASE )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
UpperCAmelCase_ : List[str] = X - np.mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = np.cov(reference_distribution.T )
try:
UpperCAmelCase_ : Any = np.linalg.inv(_SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
UpperCAmelCase_ : List[str] = np.linalg.pinv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = np.dot(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.dot(_SCREAMING_SNAKE_CASE ,X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 30 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCAmelCase ( __magic_name__ ):
for param in module.parameters():
_lowercase: str = False
def __lowerCAmelCase ( ):
_lowercase: List[str] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_lowercase: Dict = '''mps'''
if device == "mps":
print(
"WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues"
" with generations." )
return device
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Tuple = plt.imshow(_lowercase )
fig.axes.get_xaxis().set_visible(_lowercase )
fig.axes.get_yaxis().set_visible(_lowercase )
plt.show()
def __lowerCAmelCase ( ):
_lowercase: Optional[Any] = datetime.now()
_lowercase: Optional[Any] = current_time.strftime("%H:%M:%S" )
return timestamp
| 226 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__a = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__a = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_a )
class __a:
"""simple docstring"""
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
elif titles is None or texts is None:
UpperCAmelCase_ : List[str] = titles if texts is None else texts
return super().__call__(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[Any] = titles if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [titles]
UpperCAmelCase_ : List[str] = texts if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [texts]
UpperCAmelCase_ : Any = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = questions if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [questions] * n_passages
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.''' )
UpperCAmelCase_ : Tuple = super().__call__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : int = super().__call__(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : Optional[int] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ : Dict = attention_mask
return self.pad(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 4 ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = reader_input['''input_ids''']
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = reader_output[:3]
UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = sorted(range(_SCREAMING_SNAKE_CASE ) ,reverse=_SCREAMING_SNAKE_CASE ,key=relevance_logits.__getitem__ )
UpperCAmelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_ : List[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_SCREAMING_SNAKE_CASE ,top_spans=_SCREAMING_SNAKE_CASE ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_SCREAMING_SNAKE_CASE ,start_index=_SCREAMING_SNAKE_CASE ,end_index=_SCREAMING_SNAKE_CASE ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = []
for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ : int = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] ,reverse=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
UpperCAmelCase_ : str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = ['''input_ids''', '''attention_mask'''] | 30 | 0 |
def a ( snake_case__: int , snake_case__: Optional[Any] = False ):
'''simple docstring'''
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_317_044_064_679_887_385_961_981 and not allow_probable:
raise ValueError(
'''Warning: upper bound of deterministic test is exceeded. '''
'''Pass allow_probable=True to allow probabilistic test. '''
'''A return value of True indicates a probable prime.''' )
# array bounds provided by analysis
lowercase_ = [
2_047,
1_373_653,
25_326_001,
3_215_031_751,
2_152_302_898_747,
3_474_749_660_383,
341_550_071_728_321,
1,
3_825_123_056_546_413_051,
1,
1,
318_665_857_834_031_151_167_461,
3_317_044_064_679_887_385_961_981,
]
lowercase_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(_lowercase , 1 ):
if n < _p:
# then we have our last prime to check
lowercase_ = primes[:idx]
break
lowercase_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowercase_ = False
for r in range(_lowercase ):
lowercase_ = pow(_lowercase , d * 2**r , _lowercase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowercase_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def a ( ):
'''simple docstring'''
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838_201 )
assert miller_rabin(838_207 )
# 1_373_653
assert not miller_rabin(17_316_001 )
assert miller_rabin(17_316_017 )
# 25_326_001
assert not miller_rabin(3_078_386_641 )
assert miller_rabin(3_078_386_653 )
# 3_215_031_751
assert not miller_rabin(1_713_045_574_801 )
assert miller_rabin(1_713_045_574_819 )
# 2_152_302_898_747
assert not miller_rabin(2_779_799_728_307 )
assert miller_rabin(2_779_799_728_327 )
# 3_474_749_660_383
assert not miller_rabin(113_850_023_909_441 )
assert miller_rabin(113_850_023_909_527 )
# 341_550_071_728_321
assert not miller_rabin(1_275_041_018_848_804_351 )
assert miller_rabin(1_275_041_018_848_804_391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79_666_464_458_507_787_791_867 )
assert miller_rabin(79_666_464_458_507_787_791_951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552_840_677_446_647_897_660_333 )
assert miller_rabin(552_840_677_446_647_897_660_359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 97 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ ( _a ):
'''simple docstring'''
lowercase_ : List[Any] = "megatron-bert"
def __init__( self : int , snake_case__ : Optional[int]=2_90_56 , snake_case__ : Any=10_24 , snake_case__ : Any=24 , snake_case__ : str=16 , snake_case__ : List[Any]=40_96 , snake_case__ : Optional[int]="gelu" , snake_case__ : int=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : List[str]=5_12 , snake_case__ : Optional[int]=2 , snake_case__ : Dict=0.02 , snake_case__ : List[str]=1e-12 , snake_case__ : List[Any]=0 , snake_case__ : int="absolute" , snake_case__ : Any=True , **snake_case__ : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : List[str] = num_hidden_layers
UpperCAmelCase__ : Dict = num_attention_heads
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Optional[Any] = initializer_range
UpperCAmelCase__ : Dict = layer_norm_eps
UpperCAmelCase__ : Dict = position_embedding_type
UpperCAmelCase__ : int = use_cache
| 199 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''wav2vec2'''
def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.05 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="sum" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1_500) ,_SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Tuple = feat_extract_norm
UpperCAmelCase_ : List[Any] = feat_extract_activation
UpperCAmelCase_ : str = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = conv_bias
UpperCAmelCase_ : str = num_conv_pos_embeddings
UpperCAmelCase_ : Any = num_conv_pos_embedding_groups
UpperCAmelCase_ : Tuple = len(self.conv_dim )
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : str = hidden_dropout
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Tuple = activation_dropout
UpperCAmelCase_ : List[str] = feat_proj_dropout
UpperCAmelCase_ : int = final_dropout
UpperCAmelCase_ : Union[str, Any] = layerdrop
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Optional[int] = do_stable_layer_norm
UpperCAmelCase_ : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : Optional[int] = apply_spec_augment
UpperCAmelCase_ : Tuple = mask_time_prob
UpperCAmelCase_ : Optional[Any] = mask_time_length
UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks
UpperCAmelCase_ : Optional[Any] = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Union[str, Any] = num_codevectors_per_group
UpperCAmelCase_ : Any = num_codevector_groups
UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature
UpperCAmelCase_ : List[str] = feat_quantizer_dropout
UpperCAmelCase_ : Dict = num_negatives
UpperCAmelCase_ : List[str] = codevector_dim
UpperCAmelCase_ : List[str] = proj_codevector_dim
UpperCAmelCase_ : str = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : List[Any] = ctc_loss_reduction
UpperCAmelCase_ : List[str] = ctc_zero_infinity
# adapter
UpperCAmelCase_ : Optional[Any] = add_adapter
UpperCAmelCase_ : Any = adapter_kernel_size
UpperCAmelCase_ : Optional[int] = adapter_stride
UpperCAmelCase_ : List[Any] = num_adapter_layers
UpperCAmelCase_ : Optional[Any] = output_hidden_size or hidden_size
UpperCAmelCase_ : Optional[int] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = xvector_output_dim
@property
def a__ ( self ) -> Any:
return functools.reduce(operator.mul ,self.conv_stride ,1 ) | 30 | 0 |
def _UpperCamelCase ( lowercase__ = 10 , lowercase__ = 22 ):
__SCREAMING_SNAKE_CASE : Tuple = range(1 , _lowercase )
__SCREAMING_SNAKE_CASE : Optional[int] = range(1 , _lowercase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f"""{solution(1_0, 2_2) = }""")
| 696 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 | 0 |
from __future__ import annotations
from fractions import Fraction
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = []
snake_case = 11
snake_case = int('''1''' + '''0''' * digit_len )
for num in range(_lowercase ,_lowercase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowercase ,_lowercase ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
snake_case = 10
return solutions
def UpperCAmelCase__ (UpperCamelCase_ = 2 ):
"""simple docstring"""
snake_case = 1.0
for fraction in fraction_list(_lowercase ):
snake_case = Fraction(_lowercase )
result *= frac.denominator / frac.numerator
return int(_lowercase )
if __name__ == "__main__":
print(solution())
| 550 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1 ) -> Dict:
UpperCAmelCase_ : List[Any] = tokenizer
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
UpperCAmelCase_ : Optional[int] = n_copies
def __iter__( self ) -> Any:
UpperCAmelCase_ : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : str = start_length
UpperCAmelCase_ : Optional[int] = eof_strings
UpperCAmelCase_ : str = tokenizer
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCAmelCase_ : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = defaultdict(_lowercase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowercase ) ):
with torch.no_grad():
UpperCAmelCase_ : Dict = batch['''ids'''].shape[-1]
UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(_lowercase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase )
# each task is generated batch_size times
UpperCAmelCase_ : Union[str, Any] = batch['''task_id'''].repeat(_lowercase )
UpperCAmelCase_ : Dict = accelerator.pad_across_processes(
_lowercase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCAmelCase_, UpperCAmelCase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase_ : Union[str, Any] = generated_tokens.cpu().numpy()
UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowercase , _lowercase ):
gen_token_dict[task].append(_lowercase )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_lowercase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
code_gens[task].append(remove_last_block(_lowercase ) )
return code_gens
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_lowercase )
UpperCAmelCase_ : int = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase_ : List[Any] = '''false'''
if args.num_workers is None:
UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase_ : int = Accelerator()
set_seed(args.seed , device_specific=_lowercase )
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ : Any = tokenizer.eos_token
UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase_ : str = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase_ : Tuple = load_dataset('''openai_humaneval''' )
UpperCAmelCase_ : Dict = load_metric('''code_eval''' )
UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
UpperCAmelCase_ : str = args.n_samples // args.batch_size
UpperCAmelCase_ : str = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase_ : Optional[Any] = DataLoader(_lowercase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(_lowercase , _lowercase )
UpperCAmelCase_ : int = complete_code(
_lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , )
if accelerator.is_main_process:
UpperCAmelCase_ : Any = []
for task in tqdm(range(_lowercase ) ):
UpperCAmelCase_ : int = human_eval['''test'''][task]['''test''']
UpperCAmelCase_ : str = f'''check({human_eval["test"][task]["entry_point"]})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase_, UpperCAmelCase_ : Any = code_eval_metric.compute(
references=_lowercase , predictions=_lowercase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(_lowercase , _lowercase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 30 | 0 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
return params[f"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="attention" ) -> Union[str, Any]:
snake_case__ : Dict = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
snake_case__ : Dict = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
snake_case__ : Tuple = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
snake_case__ : Union[str, Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
snake_case__ : Optional[Any] = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
snake_case__ : List[Any] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
snake_case__ : str = np.ascontiguousarray(params[f"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
snake_case__ : Optional[Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> List[str]:
if split_mlp_wi:
snake_case__ : Any = params[f"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
snake_case__ : str = params[f"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
snake_case__ : int = (wi_a, wi_a)
else:
snake_case__ : str = params[f"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
snake_case__ : Optional[int] = params[f"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
return params[f"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , *, __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Dict:
snake_case__ : Optional[Any] = traverse_util.flatten_dict(variables['target'] )
snake_case__ : Optional[Any] = {'''/'''.join(_lowercase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
snake_case__ : List[str] = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('Split MLP:' , _lowercase )
snake_case__ : int = collections.OrderedDict()
# Shared embeddings.
snake_case__ : Optional[Any] = old['''token_embedder/embedding''']
# Encoder.
for i in range(_lowercase ):
# Block i, layer 0 (Self Attention).
snake_case__ : str = tax_layer_norm_lookup(_lowercase , _lowercase , 'encoder' , 'pre_attention_layer_norm' )
snake_case__ : List[str] = tax_attention_lookup(_lowercase , _lowercase , 'encoder' , 'attention' )
snake_case__ : Optional[Any] = layer_norm
snake_case__ : str = k.T
snake_case__ : Tuple = o.T
snake_case__ : Union[str, Any] = q.T
snake_case__ : Dict = v.T
# Block i, layer 1 (MLP).
snake_case__ : str = tax_layer_norm_lookup(_lowercase , _lowercase , 'encoder' , 'pre_mlp_layer_norm' )
snake_case__ : Dict = tax_mlp_lookup(_lowercase , _lowercase , 'encoder' , _lowercase )
snake_case__ : List[Any] = layer_norm
if split_mlp_wi:
snake_case__ : int = wi[0].T
snake_case__ : str = wi[1].T
else:
snake_case__ : Optional[int] = wi.T
snake_case__ : List[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case__ : List[str] = tax_relpos_bias_lookup(
_lowercase , _lowercase , 'encoder' ).T
snake_case__ : Optional[int] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
snake_case__ : str = tax_relpos_bias_lookup(
_lowercase , 0 , 'encoder' ).T
snake_case__ : Optional[int] = tax_relpos_bias_lookup(
_lowercase , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(_lowercase ):
# Block i, layer 0 (Self Attention).
snake_case__ : Union[str, Any] = tax_layer_norm_lookup(_lowercase , _lowercase , 'decoder' , 'pre_self_attention_layer_norm' )
snake_case__ : List[str] = tax_attention_lookup(_lowercase , _lowercase , 'decoder' , 'self_attention' )
snake_case__ : Union[str, Any] = layer_norm
snake_case__ : str = k.T
snake_case__ : Dict = o.T
snake_case__ : Optional[int] = q.T
snake_case__ : Union[str, Any] = v.T
# Block i, layer 1 (Cross Attention).
snake_case__ : Union[str, Any] = tax_layer_norm_lookup(_lowercase , _lowercase , 'decoder' , 'pre_cross_attention_layer_norm' )
snake_case__ : Optional[Any] = tax_attention_lookup(_lowercase , _lowercase , 'decoder' , 'encoder_decoder_attention' )
snake_case__ : Tuple = layer_norm
snake_case__ : int = k.T
snake_case__ : Optional[Any] = o.T
snake_case__ : Tuple = q.T
snake_case__ : Union[str, Any] = v.T
# Block i, layer 2 (MLP).
snake_case__ : Optional[int] = tax_layer_norm_lookup(_lowercase , _lowercase , 'decoder' , 'pre_mlp_layer_norm' )
snake_case__ : Optional[int] = tax_mlp_lookup(_lowercase , _lowercase , 'decoder' , _lowercase )
snake_case__ : List[Any] = layer_norm
if split_mlp_wi:
snake_case__ : int = wi[0].T
snake_case__ : Any = wi[1].T
else:
snake_case__ : Any = wi.T
snake_case__ : Dict = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case__ : List[str] = tax_relpos_bias_lookup(_lowercase , _lowercase , 'decoder' ).T
snake_case__ : List[str] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
snake_case__ : Optional[Any] = old['''decoder/logits_dense/kernel'''].T
return new
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
snake_case__ : List[str] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
snake_case__ : Tuple = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
snake_case__ : Any = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
snake_case__ : Tuple = state_dict['''shared.weight''']
return state_dict
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
snake_case__ : int = checkpoints.load_tax_checkpoint(_lowercase )
snake_case__ : List[Any] = convert_tax_to_pytorch(
_lowercase , num_layers=config.num_layers , is_encoder_only=_lowercase , scalable_attention=_lowercase )
snake_case__ : Optional[Any] = make_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase , strict=_lowercase )
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , ) -> Optional[int]:
snake_case__ : Optional[Any] = MTaConfig.from_json_file(_lowercase )
print(f"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
snake_case__ : Any = UMTaEncoderModel(_lowercase )
else:
snake_case__ : List[Any] = UMTaForConditionalGeneration(_lowercase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_lowercase )
# Verify that we can load the checkpoint.
model.from_pretrained(_lowercase )
print('Done' )
if __name__ == "__main__":
A_ = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
A_ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 270 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__a = logging.get_logger(__name__)
__a = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''imagegpt'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=512 + 1 ,_SCREAMING_SNAKE_CASE=32 * 32 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : Union[str, Any] = n_embd
UpperCAmelCase_ : Any = n_layer
UpperCAmelCase_ : Optional[Any] = n_head
UpperCAmelCase_ : Union[str, Any] = n_inner
UpperCAmelCase_ : List[Any] = activation_function
UpperCAmelCase_ : List[str] = resid_pdrop
UpperCAmelCase_ : str = embd_pdrop
UpperCAmelCase_ : Optional[Any] = attn_pdrop
UpperCAmelCase_ : Dict = layer_norm_epsilon
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Dict = scale_attn_weights
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : List[str] = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ : Tuple = reorder_and_upcast_attn
UpperCAmelCase_ : int = tie_word_embeddings
super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
class __a( _a ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,) -> Mapping[str, Any]:
UpperCAmelCase_ : Any = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) )
return inputs | 30 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''instructblip_vision_model'''
def __init__( self , lowerCamelCase=14_08 , lowerCamelCase=61_44 , lowerCamelCase=39 , lowerCamelCase=16 , lowerCamelCase=2_24 , lowerCamelCase=14 , lowerCamelCase="gelu" , lowerCamelCase=1e-6 , lowerCamelCase=0.0 , lowerCamelCase=1e-10 , lowerCamelCase=True , **lowerCamelCase , ) -> Any:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : List[Any] = intermediate_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : str = image_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Optional[int] = attention_dropout
UpperCamelCase : List[str] = layer_norm_eps
UpperCamelCase : List[Any] = hidden_act
UpperCamelCase : Any = qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCamelCase , **lowerCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
UpperCamelCase : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''instructblip_qformer'''
def __init__( self , lowerCamelCase=3_05_22 , lowerCamelCase=7_68 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=30_72 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=5_12 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=0 , lowerCamelCase="absolute" , lowerCamelCase=2 , lowerCamelCase=14_08 , **lowerCamelCase , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : Any = num_attention_heads
UpperCamelCase : Optional[int] = hidden_act
UpperCamelCase : List[Any] = intermediate_size
UpperCamelCase : Optional[int] = hidden_dropout_prob
UpperCamelCase : List[Any] = attention_probs_dropout_prob
UpperCamelCase : List[Any] = max_position_embeddings
UpperCamelCase : int = initializer_range
UpperCamelCase : List[str] = layer_norm_eps
UpperCamelCase : Tuple = position_embedding_type
UpperCamelCase : int = cross_attention_frequency
UpperCamelCase : Optional[int] = encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCamelCase , **lowerCamelCase ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
UpperCamelCase : str = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''instructblip'''
__SCREAMING_SNAKE_CASE = True
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=32 , **lowerCamelCase ) -> List[str]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
if vision_config is None:
UpperCamelCase : List[Any] = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
UpperCamelCase : Tuple = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
UpperCamelCase : str = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
UpperCamelCase : List[str] = InstructBlipVisionConfig(**_SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = InstructBlipQFormerConfig(**_SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
UpperCamelCase : List[Any] = CONFIG_MAPPING[text_model_type](**_SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = self.text_config.tie_word_embeddings
UpperCamelCase : List[Any] = self.text_config.is_encoder_decoder
UpperCamelCase : int = num_query_tokens
UpperCamelCase : Any = self.vision_config.hidden_size
UpperCamelCase : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCamelCase : Optional[Any] = 1.0
UpperCamelCase : Any = 0.02
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ) -> Optional[Any]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase : Any = self.vision_config.to_dict()
UpperCamelCase : List[str] = self.qformer_config.to_dict()
UpperCamelCase : Dict = self.text_config.to_dict()
UpperCamelCase : Optional[Any] = self.__class__.model_type
return output
| 173 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__a = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
__a = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase_ : Union[str, Any] = int(re.match(r'''.*layer_(\d*).*''' , _lowercase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase_ : Any = re.search(r'''[^\d](\d+)$''' , str(_lowercase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
UpperCAmelCase_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if bloom_config_file == "":
UpperCAmelCase_ : Tuple = BloomConfig()
else:
UpperCAmelCase_ : Optional[int] = BloomConfig.from_json_file(_lowercase )
if shard_model:
UpperCAmelCase_ : Any = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = {'''weight_map''': {}, '''metadata''': {}}
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = BloomConfig()
for j, file in enumerate(_lowercase ):
print('''Processing file: {}'''.format(_lowercase ) )
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : Tuple = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Any = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : Dict = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Union[str, Any] = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : Tuple = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : List[str] = tensors[key] / pretraining_tp
torch.save(
_lowercase , os.path.join(
_lowercase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase_ : Union[str, Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) )
UpperCAmelCase_ : List[Any] = BloomConfig()
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : List[str] = total_size
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_lowercase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Optional[Any] = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n'''
f.write(_lowercase )
else:
UpperCAmelCase_ : Any = BloomModel(_lowercase )
UpperCAmelCase_ : Tuple = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = None
for i, file in enumerate(_lowercase ):
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : List[Any] = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Optional[int] = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : str = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Dict = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : Dict = tensors[key] / pretraining_tp
UpperCAmelCase_ : Tuple = model.load_state_dict(_lowercase , strict=_lowercase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
UpperCAmelCase_ : Union[str, Any] = set(other_keys.missing_keys )
else:
UpperCAmelCase_ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCAmelCase_ : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
UpperCAmelCase_ : Optional[int] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _lowercase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
__a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 30 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowercase_ :
lowerCAmelCase__ =PegasusConfig
lowerCAmelCase__ ={}
lowerCAmelCase__ ="gelu"
def __init__( self : str , snake_case__ : Dict , snake_case__ : Dict=13 , snake_case__ : Optional[Any]=7 , snake_case__ : int=True , snake_case__ : Any=False , snake_case__ : str=99 , snake_case__ : Union[str, Any]=32 , snake_case__ : str=2 , snake_case__ : Union[str, Any]=4 , snake_case__ : Optional[Any]=37 , snake_case__ : List[Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : Tuple=40 , snake_case__ : Optional[int]=2 , snake_case__ : Dict=1 , snake_case__ : Dict=0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE_ = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE_ = prepare_pegasus_inputs_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, inputs_dict
def __a ( self : int , snake_case__ : Any , snake_case__ : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFPegasusModel(config=_SCREAMING_SNAKE_CASE ).get_decoder()
SCREAMING_SNAKE_CASE_ = inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ = input_ids[:1, :]
SCREAMING_SNAKE_CASE_ = inputs_dict['''attention_mask'''][:1, :]
SCREAMING_SNAKE_CASE_ = inputs_dict['''head_mask''']
SCREAMING_SNAKE_CASE_ = 1
# first forward pass
SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
SCREAMING_SNAKE_CASE_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE_ = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rtol=1e-3 )
def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , )-> str:
if attention_mask is None:
SCREAMING_SNAKE_CASE_ = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase_ (_a , _a , unittest.TestCase ):
lowerCAmelCase__ =(TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
lowerCAmelCase__ =(TFPegasusForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase__ =(
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ =True
lowerCAmelCase__ =False
lowerCAmelCase__ =False
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFPegasusModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE )
def __a ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_SCREAMING_SNAKE_CASE )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowercase_ (unittest.TestCase ):
lowerCAmelCase__ =[
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning \'Oh I think you\'re nominated\'\", said Dappy.\"And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around.\"At the end of the day we\'re grateful to be where we are in our careers.\"If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
lowerCAmelCase__ =[
"California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
lowerCAmelCase__ ="google/pegasus-xsum"
@cached_property
def __a ( self : List[Any] ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __a ( self : Optional[int] , **snake_case__ : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.translate_src_text(**_SCREAMING_SNAKE_CASE )
assert self.expected_text == generated_words
def __a ( self : Optional[Any] , **snake_case__ : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.src_text , **_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='tf' )
SCREAMING_SNAKE_CASE_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_SCREAMING_SNAKE_CASE )
return generated_words
@slow
def __a ( self : int ):
"""simple docstring"""
self._assert_generated_batch_equal_expected() | 360 |
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
for i in range(1 , 1001 ):
total += i**i
return str(_lowercase )[-10:]
if __name__ == "__main__":
print(solution()) | 30 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class a ( _a ):
"""simple docstring"""
A__ : List[Any] = "imagegpt"
A__ : Dict = ["past_key_values"]
A__ : Dict = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , snake_case_=512 + 1 , snake_case_=32 * 32 , snake_case_=512 , snake_case_=24 , snake_case_=8 , snake_case_=None , snake_case_="quick_gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1e-5 , snake_case_=0.02 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , **snake_case_ , ) -> Optional[int]:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_embd
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = n_inner
_UpperCAmelCase = activation_function
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scale_attn_weights
_UpperCAmelCase = use_cache
_UpperCAmelCase = scale_attn_by_inverse_layer_idx
_UpperCAmelCase = reorder_and_upcast_attn
_UpperCAmelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class a ( _a ):
"""simple docstring"""
@property
def __A ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def __A ( self , snake_case_ , snake_case_ = 1 , snake_case_ = -1 , snake_case_ = False , snake_case_ = None , snake_case_ = 3 , snake_case_ = 32 , snake_case_ = 32 , ) -> Mapping[str, Any]:
_UpperCAmelCase = self._generate_dummy_images(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = dict(preprocessor(images=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
return inputs
| 426 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__a = None
__a = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__a = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = None
# Automatically constructed
lowerCAmelCase = "PIL.Image.Image"
lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase = field(default='''Image''' , init=_a , repr=_a )
def __call__( self ) -> Tuple:
return self.pa_type
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Dict = path.split('''::''' )[-1]
try:
UpperCAmelCase_ : Optional[int] = string_to_dict(_SCREAMING_SNAKE_CASE ,config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase_ : Tuple = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
UpperCAmelCase_ : Optional[Any] = None
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ,use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
UpperCAmelCase_ : List[str] = BytesIO(f.read() )
UpperCAmelCase_ : Optional[Any] = PIL.Image.open(bytes_ )
else:
UpperCAmelCase_ : List[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCAmelCase_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase_ : Dict = storage.field('''bytes''' )
else:
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase_ : int = storage.field('''path''' )
else:
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f:
UpperCAmelCase_ : Any = f.read()
return bytes_
UpperCAmelCase_ : Union[str, Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
UpperCAmelCase_ : List[str] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,)
UpperCAmelCase_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def lowerCamelCase__ ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase_ : Optional[int] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase_ : int = image.format
else:
UpperCAmelCase_ : List[Any] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(_lowercase , format=_lowercase )
return buffer.getvalue()
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if hasattr(_lowercase , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
UpperCAmelCase_ : Tuple = array.dtype
UpperCAmelCase_ : List[str] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
UpperCAmelCase_ : Dict = dtype.kind
UpperCAmelCase_ : Union[str, Any] = dtype.itemsize
UpperCAmelCase_ : Optional[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase_ : Tuple = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase_ : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase_ : Union[str, Any] = dtype_byteorder + dtype_kind + str(_lowercase )
UpperCAmelCase_ : str = np.dtype(_lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCAmelCase_ : Any = PIL.Image.fromarray(array.astype(_lowercase ) )
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
UpperCAmelCase_, UpperCAmelCase_ : Tuple = first_non_null_value(_lowercase )
if isinstance(_lowercase , _lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowercase , np.ndarray ):
UpperCAmelCase_ : Any = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
elif isinstance(_lowercase , PIL.Image.Image ):
UpperCAmelCase_ : Union[str, Any] = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
else:
return objs
else:
return objs | 30 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
__a : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
__a : Optional[int] = load_dataset('nielsr/rvlcdip-demo' )
__a : Optional[Any] = dataset['''train'''][0]['''image'''].convert('RGB' )
__a : str = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__a : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
__a : List[Any] = outputs.logits
__a : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape , _SCREAMING_SNAKE_CASE )
__a : int = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=_SCREAMING_SNAKE_CASE , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 476 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
UpperCAmelCase_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
UpperCAmelCase_ : Optional[int] = load_dataset('''nielsr/rvlcdip-demo''' )
UpperCAmelCase_ : Optional[Any] = dataset['''train'''][0]['''image'''].convert('''RGB''' )
UpperCAmelCase_ : str = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.logits
UpperCAmelCase_ : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.float ,)
self.assertTrue(torch.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) | 30 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __lowerCAmelCase ( _UpperCamelCase=None ) -> Any:
'''simple docstring'''
if subparsers is not None:
lowerCamelCase__: Optional[Any] = subparsers.add_parser("""env""" )
else:
lowerCamelCase__: List[str] = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=_lowercase , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def __lowerCAmelCase ( _UpperCamelCase ) -> Any:
'''simple docstring'''
lowerCamelCase__: Dict = torch.__version__
lowerCamelCase__: Optional[Any] = torch.cuda.is_available()
lowerCamelCase__: int = is_xpu_available()
lowerCamelCase__: Union[str, Any] = is_npu_available()
lowerCamelCase__: Optional[Any] = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_lowercase ):
lowerCamelCase__: List[Any] = load_config_from_file(args.config_file ).to_dict()
lowerCamelCase__: Tuple = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''PyTorch XPU available''': str(_lowercase ),
'''PyTorch NPU available''': str(_lowercase ),
'''System RAM''': f"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
lowerCamelCase__: int = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
lowerCamelCase__: Optional[int] = (
'''\n'''.join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_lowercase , _lowercase )
else f"""\t{accelerate_config}"""
)
print(_lowercase )
lowerCamelCase__: int = accelerate_config
return info
def __lowerCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] = env_command_parser()
lowerCamelCase__: Tuple = parser.parse_args()
env_command(_lowercase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 306 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) | 30 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_SCREAMING_SNAKE_CASE : Tuple = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class A ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase : Union[str, Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase : Union[str, Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase: Dict = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt")
_lowercase: Optional[int] = text_classifier("This is great !")
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "LABEL_0", "score": 0.5_0_4}])
_lowercase: Tuple = text_classifier("This is great !" , top_k=2)
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}])
_lowercase: Union[str, Any] = text_classifier(["This is great !", "This is bad"] , top_k=2)
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE) , [
[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}],
[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}],
] , )
_lowercase: int = text_classifier("This is great !" , top_k=1)
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "LABEL_0", "score": 0.5_0_4}])
# Legacy behavior
_lowercase: int = text_classifier("This is great !" , return_all_scores=_SCREAMING_SNAKE_CASE)
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "LABEL_0", "score": 0.5_0_4}])
_lowercase: Optional[int] = text_classifier("This is great !" , return_all_scores=_SCREAMING_SNAKE_CASE)
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE) , [[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}]])
_lowercase: Any = text_classifier(["This is great !", "Something else"] , return_all_scores=_SCREAMING_SNAKE_CASE)
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE) , [
[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}],
[{"label": "LABEL_0", "score": 0.5_0_4}, {"label": "LABEL_1", "score": 0.4_9_6}],
] , )
_lowercase: Tuple = text_classifier(["This is great !", "Something else"] , return_all_scores=_SCREAMING_SNAKE_CASE)
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE) , [
{"label": "LABEL_0", "score": 0.5_0_4},
{"label": "LABEL_0", "score": 0.5_0_4},
] , )
@require_torch
def UpperCAmelCase__ ( self : Any):
import torch
_lowercase: int = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu") , )
_lowercase: Optional[int] = text_classifier("This is great !")
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "LABEL_0", "score": 0.5_0_4}])
@require_tf
def UpperCAmelCase__ ( self : Tuple):
_lowercase: Tuple = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf")
_lowercase: Tuple = text_classifier("This is great !")
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "LABEL_0", "score": 0.5_0_4}])
@slow
@require_torch
def UpperCAmelCase__ ( self : int):
_lowercase: Union[str, Any] = pipeline("text-classification")
_lowercase: Optional[Any] = text_classifier("This is great !")
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "POSITIVE", "score": 1.0}])
_lowercase: Tuple = text_classifier("This is bad !")
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "NEGATIVE", "score": 1.0}])
_lowercase: Tuple = text_classifier("Birds are a type of animal")
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "POSITIVE", "score": 0.9_8_8}])
@slow
@require_tf
def UpperCAmelCase__ ( self : Optional[Any]):
_lowercase: Union[str, Any] = pipeline("text-classification" , framework="tf")
_lowercase: List[Any] = text_classifier("This is great !")
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "POSITIVE", "score": 1.0}])
_lowercase: Any = text_classifier("This is bad !")
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "NEGATIVE", "score": 1.0}])
_lowercase: Optional[Any] = text_classifier("Birds are a type of animal")
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": "POSITIVE", "score": 0.9_8_8}])
def UpperCAmelCase__ ( self : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any]):
_lowercase: Optional[int] = TextClassificationPipeline(model=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE)
return text_classifier, ["HuggingFace is in", "This is another test"]
def UpperCAmelCase__ ( self : int , _UpperCamelCase : Dict , _UpperCamelCase : List[Any]):
_lowercase: Tuple = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_lowercase: Tuple = '''HuggingFace is in'''
_lowercase: str = text_classifier(_SCREAMING_SNAKE_CASE)
self.assertEqual(nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": ANY(_SCREAMING_SNAKE_CASE), "score": ANY(_SCREAMING_SNAKE_CASE)}])
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values())
_lowercase: int = ['''HuggingFace is in ''', '''Paris is in France''']
_lowercase: Optional[Any] = text_classifier(_SCREAMING_SNAKE_CASE)
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": ANY(_SCREAMING_SNAKE_CASE), "score": ANY(_SCREAMING_SNAKE_CASE)}, {"label": ANY(_SCREAMING_SNAKE_CASE), "score": ANY(_SCREAMING_SNAKE_CASE)}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values())
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values())
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_lowercase: Optional[Any] = text_classifier(_SCREAMING_SNAKE_CASE , top_k=_SCREAMING_SNAKE_CASE)
_lowercase: Union[str, Any] = len(model.config.idalabel.values())
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE) , [[{"label": ANY(_SCREAMING_SNAKE_CASE), "score": ANY(_SCREAMING_SNAKE_CASE)}] * N, [{"label": ANY(_SCREAMING_SNAKE_CASE), "score": ANY(_SCREAMING_SNAKE_CASE)}] * N] , )
_lowercase: List[str] = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''}
_lowercase: Any = text_classifier(_SCREAMING_SNAKE_CASE)
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE) , {"label": ANY(_SCREAMING_SNAKE_CASE), "score": ANY(_SCREAMING_SNAKE_CASE)} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values())
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_lowercase: Union[str, Any] = [['''HuggingFace is in ''', '''Paris is in France''']]
with self.assertRaises(_SCREAMING_SNAKE_CASE):
text_classifier(_SCREAMING_SNAKE_CASE)
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_lowercase: Optional[int] = text_classifier([[["HuggingFace is in ", "Paris is in France"]]])
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE) , [{"label": ANY(_SCREAMING_SNAKE_CASE), "score": ANY(_SCREAMING_SNAKE_CASE)}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values())
| 226 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0.9 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,) -> Optional[int]:
UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 30}
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Any = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize_and_center_crop
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : List[str] = crop_pct
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : Any = do_normalize
UpperCAmelCase_ : str = image_mean
UpperCAmelCase_ : List[Any] = image_std
def a__ ( self ) -> str:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def a__ ( self ) -> Dict:
UpperCAmelCase_ : str = PoolFormerImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''crop_pct''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_mean''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_std''' ) )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size ,{'''height''': 30, '''width''': 30} )
UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,) | 30 | 0 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowercase__:
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : List[Any]=1_0 , SCREAMING_SNAKE_CASE_ : List[Any]=[8, 1_6, 3_2, 6_4] , SCREAMING_SNAKE_CASE_ : str=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : List[str]="relu" , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Dict=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE_ : Optional[Any]=[2, 3, 4] , SCREAMING_SNAKE_CASE_ : str=1 , ) -> Union[str, Any]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = num_channels
lowercase_ = embeddings_size
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = is_training
lowercase_ = use_labels
lowercase_ = hidden_act
lowercase_ = num_labels
lowercase_ = scope
lowercase_ = len(_SCREAMING_SNAKE_CASE )
lowercase_ = out_features
lowercase_ = out_indices
lowercase_ = num_groups
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Tuple ) -> List[Any]:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Dict:
lowercase_ = BitModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
lowercase_ = self.num_labels
lowercase_ = BitForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> Optional[Any]:
lowercase_ = BitBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = BitBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowercase_ = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase ( self : int ) -> Optional[Any]:
lowercase_ = self.prepare_config_and_inputs()
lowercase_ = config_and_inputs
lowercase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__( _a , _a , unittest.TestCase ):
"""simple docstring"""
a :int = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a :List[str] = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
a :Optional[int] = False
a :Optional[Any] = False
a :str = False
a :Any = False
a :str = False
def _lowercase ( self : str ) -> int:
lowercase_ = BitModelTester(self )
lowercase_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : int ) -> Any:
return
@unittest.skip(reason='''Bit does not output attentions''' )
def _lowercase ( self : Dict ) -> Union[str, Any]:
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def _lowercase ( self : int ) -> List[str]:
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def _lowercase ( self : List[str] ) -> Optional[int]:
pass
def _lowercase ( self : List[str] ) -> int:
lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(_SCREAMING_SNAKE_CASE )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _lowercase ( self : int ) -> List[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _lowercase ( self : Tuple ) -> List[str]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
def _lowercase ( self : int ) -> int:
lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ = model_class(config=_SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def _lowercase ( self : int ) -> Optional[int]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowercase_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowercase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ = layer_type
lowercase_ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def _lowercase ( self : Dict ) -> List[str]:
pass
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self : List[str] ) -> Optional[int]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = BitModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def a ( ):
'''simple docstring'''
lowercase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : int ) -> Optional[Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _lowercase ( self : str ) -> List[Any]:
lowercase_ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_SCREAMING_SNAKE_CASE )
lowercase_ = self.default_image_processor
lowercase_ = prepare_img()
lowercase_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase_ = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
lowercase_ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowercase_ = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@require_torch
class lowercase__( _a , unittest.TestCase ):
"""simple docstring"""
a :List[Any] = (BitBackbone,) if is_torch_available() else ()
a :int = BitConfig
a :List[str] = False
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
lowercase_ = BitModelTester(self )
| 97 |
import unittest
import numpy as np
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = None , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = np.shape(_lowercase )
UpperCAmelCase_ : Optional[Any] = np.shape(_lowercase )
UpperCAmelCase_ : Tuple = np.shape(_lowercase )
if shape_a[0] != shape_b[0]:
UpperCAmelCase_ : Tuple = (
'''Expected the same number of rows for A and B. '''
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(_lowercase )
if shape_b[1] != shape_c[1]:
UpperCAmelCase_ : List[Any] = (
'''Expected the same number of columns for B and C. '''
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(_lowercase )
UpperCAmelCase_ : Dict = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase_ : Any = np.linalg.inv(_lowercase )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : List[str] = np.array([[2, 1], [6, 3]] )
UpperCAmelCase_ : Tuple = schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.block([[a, b], [b.T, c]] )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = np.linalg.det(_SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE ,det_a * det_s )
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> None:
UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : int = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 30 | 0 |
'''simple docstring'''
def snake_case_ ( lowercase__ , lowercase__ ):
UpperCAmelCase__ : Any = len(_lowercase )
print("The following activities are selected:" )
# The first activity is always selected
UpperCAmelCase__ : int = 0
print(_lowercase , end="," )
# Consider rest of the activities
for j in range(_lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_lowercase , end="," )
UpperCAmelCase__ : Optional[Any] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE = [1, 3, 0, 5, 8, 5]
SCREAMING_SNAKE_CASE = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 199 |
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
UpperCAmelCase_ : Any = ''''''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
UpperCAmelCase_ : Any = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase_ : Union[str, Any] = B'''=''' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
UpperCAmelCase_ : int = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowercase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Tuple = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase , _lowercase ):
try:
UpperCAmelCase_ : Any = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
UpperCAmelCase_ : str = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase_ : List[Any] = encoded_data[:-padding]
UpperCAmelCase_ : List[Any] = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase_ : Tuple = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase_ : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowercase ) , 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 | 0 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ , lowercase__ = None , lowercase__ = None ):
if start is None:
__SCREAMING_SNAKE_CASE : List[str] = 0
if end is None:
__SCREAMING_SNAKE_CASE : Dict = len(_lowercase ) - 1
if start >= end:
return
__SCREAMING_SNAKE_CASE : Optional[Any] = (start + end) // 2
slowsort(_lowercase , _lowercase , _lowercase )
slowsort(_lowercase , mid + 1 , _lowercase )
if sequence[end] < sequence[mid]:
__SCREAMING_SNAKE_CASE : List[str] = sequence[mid], sequence[end]
slowsort(_lowercase , _lowercase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 696 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 0
@slow
def a__ ( self ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,20 )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained('''./''' ,tokenizer_type='''xxx''' )
@require_tokenizers
def a__ ( self ) -> Optional[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase_ : Any = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
else:
self.assertEqual(tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
@require_tokenizers
def a__ ( self ) -> List[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' ,):
UpperCAmelCase_ : int = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def a__ ( self ) -> Optional[Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
UpperCAmelCase_ : int = TOKENIZER_MAPPING.values()
UpperCAmelCase_ : List[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Tuple:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ,use_fast=_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = '''Hello, world. How are you?'''
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
@require_tokenizers
def a__ ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
self.assertEqual(tokenizer.vocab_size ,30_000 )
self.assertEqual(tokenizer.unk_token ,'''[UNK]''' )
self.assertEqual(tokenizer.padding_side ,'''right''' )
self.assertEqual(tokenizer.truncation_side ,'''right''' )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size ,12 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
# Check we can load the tokenizer config of an online model.
UpperCAmelCase_ : int = get_tokenizer_config('''bert-base-cased''' )
UpperCAmelCase_ : Optional[int] = config.pop('''_commit_hash''' ,_SCREAMING_SNAKE_CASE )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_SCREAMING_SNAKE_CASE ,{'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase_ : Any = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] ,'''BertTokenizer''' )
def a__ ( self ) -> str:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def a__ ( self ) -> int:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
# Can register in two steps
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = False
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = NewTokenizer
lowerCAmelCase = False
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
def a__ ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' )
def a__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' )
def a__ ( self ) -> Any:
# Make sure we have cached the tokenizer.
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 ) | 30 | 0 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self ):
super().__init__()
snake_case = nn.Linear(3 , 4 )
snake_case = nn.BatchNormad(4 )
snake_case = nn.Linear(4 , 5 )
def a_ ( self , __snake_case ):
return self.lineara(self.batchnorm(self.lineara(_SCREAMING_SNAKE_CASE ) ) )
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
snake_case = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_SCREAMING_SNAKE_CASE , model.state_dict() )
snake_case = os.path.join(_SCREAMING_SNAKE_CASE , '''index.json''' )
self.assertTrue(os.path.isfile(_SCREAMING_SNAKE_CASE ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
snake_case = os.path.join(_SCREAMING_SNAKE_CASE , F'''{key}.dat''' )
self.assertTrue(os.path.isfile(_SCREAMING_SNAKE_CASE ) )
# TODO: add tests on the fact weights are properly loaded
def a_ ( self ):
snake_case = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
snake_case = torch.randn(2 , 3 , dtype=_SCREAMING_SNAKE_CASE )
with TemporaryDirectory() as tmp_dir:
snake_case = offload_weight(_SCREAMING_SNAKE_CASE , '''weight''' , _SCREAMING_SNAKE_CASE , {} )
snake_case = os.path.join(_SCREAMING_SNAKE_CASE , '''weight.dat''' )
self.assertTrue(os.path.isfile(_SCREAMING_SNAKE_CASE ) )
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(_SCREAMING_SNAKE_CASE ).split('''.''' )[1]}} )
snake_case = load_offloaded_weight(_SCREAMING_SNAKE_CASE , index['''weight'''] )
self.assertTrue(torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def a_ ( self ):
snake_case = ModelForTest()
snake_case = model.state_dict()
snake_case = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
snake_case = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case = OffloadedWeightsLoader(state_dict=_SCREAMING_SNAKE_CASE , save_folder=_SCREAMING_SNAKE_CASE )
# Every key is there with the right value
self.assertEqual(sorted(_SCREAMING_SNAKE_CASE ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , weight_map[key] ) )
snake_case = {k: v for k, v in state_dict.items() if '''weight''' in k}
snake_case = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case = OffloadedWeightsLoader(state_dict=_SCREAMING_SNAKE_CASE , save_folder=_SCREAMING_SNAKE_CASE )
# Every key is there with the right value
self.assertEqual(sorted(_SCREAMING_SNAKE_CASE ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Duplicates are removed
snake_case = OffloadedWeightsLoader(state_dict=_SCREAMING_SNAKE_CASE , save_folder=_SCREAMING_SNAKE_CASE )
# Every key is there with the right value
self.assertEqual(sorted(_SCREAMING_SNAKE_CASE ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , weight_map[key] ) )
def a_ ( self ):
snake_case = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
snake_case = extract_submodules_state_dict(_SCREAMING_SNAKE_CASE , ['''a.1''', '''a.2'''] )
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {'''a.1''': 0, '''a.2''': 2} )
snake_case = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
snake_case = extract_submodules_state_dict(_SCREAMING_SNAKE_CASE , ['''a.1''', '''a.2'''] )
self.assertDictEqual(_SCREAMING_SNAKE_CASE , {'''a.1.a''': 0, '''a.2.a''': 2} )
| 550 |
from functools import reduce
__a = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase__ ( _lowercase = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) )
for i in range(len(_lowercase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 30 | 0 |
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
A_ = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
A_ = parser.parse_args()
A_ = "cpu"
A_ = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
A_ = "path-to-your-trained-model"
A_ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
A_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
A_ = pipe.to(device)
# to channels last
A_ = pipe.unet.to(memory_format=torch.channels_last)
A_ = pipe.vae.to(memory_format=torch.channels_last)
A_ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
A_ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
A_ = torch.randn(2, 4, 64, 64)
A_ = torch.rand(1) * 999
A_ = torch.randn(2, 77, 768)
A_ = (sample, timestep, encoder_hidden_status)
try:
A_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
A_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
A_ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
A_ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
A_ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
A_ = 666
A_ = torch.Generator(device).manual_seed(seed)
A_ = {"generator": generator}
if args.steps is not None:
A_ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
A_ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 270 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
UpperCAmelCase_ : Tuple = precision
UpperCAmelCase_ : Optional[Any] = ceil(precision / 14 )
UpperCAmelCase_ : int = 426880 * Decimal(10005 ).sqrt()
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : List[Any] = 13591409
UpperCAmelCase_ : Optional[Any] = Decimal(_lowercase )
for k in range(1 , _lowercase ):
UpperCAmelCase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__a = 50
print(F"""The first {n} digits of pi is: {pi(n)}""") | 30 | 0 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase_ = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase_ = object()
def A__ ( A : str , A : Optional[int]):
'''simple docstring'''
UpperCamelCase : Optional[int] = tuple((re.compile(x + "$") for x in qs))
for i in range(len(_lowercase) - len(_lowercase) + 1):
UpperCamelCase : List[Any] = [x.match(_lowercase) for x, y in zip(_lowercase , ks[i:])]
if matches and all(_lowercase):
return True
return False
def A__ ( A : List[str]):
'''simple docstring'''
def replace(A : Optional[int] , A : Dict):
for rule, replacement in rules:
if _match(_lowercase , _lowercase):
return replacement
return val
return replace
def A__ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P("mp" , _lowercase)),
(("transformer", "wte", "embedding"), P("mp" , _lowercase)),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_lowercase , "mp")),
(("attention", "out_proj", "kernel"), P("mp" , _lowercase)),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_lowercase , "mp")),
(("mlp", "c_fc", "bias"), P("mp")),
(("mlp", "c_proj", "kernel"), P("mp" , _lowercase)),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def A__ ( A : Optional[Any]):
'''simple docstring'''
UpperCamelCase : Any = _get_partition_rules()
UpperCamelCase : List[str] = _replacement_rules(_lowercase)
UpperCamelCase : Dict = {k: _unmatched for k in flatten_dict(_lowercase)}
UpperCamelCase : Optional[Any] = {k: replace(_lowercase , _lowercase) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_lowercase))
| 173 |
from __future__ import annotations
import math
__a = '2020.9.26'
__a = 'xcodz-dot, cclaus, dhruvmanila'
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ):
UpperCAmelCase_ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(_lowercase )
UpperCAmelCase_ : Tuple = ((x * distance) / (z + distance)) * scale
UpperCAmelCase_ : str = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Axis must be a str''' )
UpperCAmelCase_ : Optional[Any] = locals()
del input_variables["axis"]
if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ):
UpperCAmelCase_ : List[Any] = (
'''Input values except axis must either be float or int: '''
f'''{list(input_variables.values() )}'''
)
raise TypeError(_lowercase )
UpperCAmelCase_ : Dict = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
UpperCAmelCase_ : Optional[int] = x * math.cos(_lowercase ) - y * math.sin(_lowercase )
UpperCAmelCase_ : List[Any] = y * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z
elif axis == "x":
UpperCAmelCase_ : Any = y * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : int = z * math.cos(_lowercase ) + y * math.sin(_lowercase )
UpperCAmelCase_ : Dict = x
elif axis == "y":
UpperCAmelCase_ : Union[str, Any] = x * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""") | 30 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE: Tuple = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE: Any = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE: Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 360 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__a = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__a = concatenate_datasets
__a = DownloadConfig
__a = DownloadManager
__a = DownloadMode
__a = DownloadConfig
__a = DownloadMode
__a = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 30 | 0 |
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def A__ ( A__ , A__ , A__ , A__ , ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = grid.shape
_UpperCAmelCase = [-1, 1, 0, 0]
_UpperCAmelCase = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
_UpperCAmelCase = [(0, source)], set()
_UpperCAmelCase = np.full((rows, cols) , np.inf )
_UpperCAmelCase = 0
_UpperCAmelCase = np.empty((rows, cols) , dtype=_lowercase )
_UpperCAmelCase = None
while queue:
(_UpperCAmelCase) = heappop(_lowercase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
_UpperCAmelCase = []
while (x, y) != source:
path.append((x, y) )
_UpperCAmelCase = predecessors[x, y]
path.append(_lowercase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_lowercase ) ):
_UpperCAmelCase = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
_UpperCAmelCase = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_lowercase , (dist + 1, (nx, ny)) )
_UpperCAmelCase = dist + 1
_UpperCAmelCase = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 426 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
while a != 0:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = b % a, a
return b
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if gcd(_lowercase , _lowercase ) != 1:
UpperCAmelCase_ : int = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowercase )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = 1, 0, a
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = 0, 1, m
while va != 0:
UpperCAmelCase_ : List[Any] = ua // va
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 30 | 0 |
'''simple docstring'''
__lowercase : str = 'Input must be a string of 8 numbers plus letter'
__lowercase : Union[str, Any] = 'TRWAGMYFPDXBNJZSQVHLCKE'
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
if not isinstance(_lowercase , _lowercase ):
__a : List[Any] = F"""Expected string as input, found {type(_lowercase ).__name__}"""
raise TypeError(_lowercase )
__a : Tuple = spanish_id.replace('-' , '' ).upper()
if len(_lowercase ) != 9:
raise ValueError(_lowercase )
try:
__a : str = int(spanish_id_clean[0:8] )
__a : Optional[int] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_lowercase ) from ex
if letter.isdigit():
raise ValueError(_lowercase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 476 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCAmelCase = '''CIDAS/clipseg-rd64-refined'''
lowerCAmelCase = '''image_segmenter'''
lowerCAmelCase = CLIPSegForImageSegmentation
lowerCAmelCase = ['''image''', '''text''']
lowerCAmelCase = ['''image''']
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self ,['''vision'''] )
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.pre_processor(text=[label] ,images=[image] ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
with torch.no_grad():
UpperCAmelCase_ : Dict = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : Dict = outputs.cpu().detach().numpy()
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) ) | 30 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 306 |
import numpy as np
import datasets
__a = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__a = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__a = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' ,id='''sequence''' ) ,id='''X''' ),
} ) ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# convert to numpy arrays
UpperCAmelCase_ : str = np.array(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.array(_SCREAMING_SNAKE_CASE )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
UpperCAmelCase_ : List[str] = X - np.mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = np.cov(reference_distribution.T )
try:
UpperCAmelCase_ : Any = np.linalg.inv(_SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
UpperCAmelCase_ : List[str] = np.linalg.pinv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = np.dot(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.dot(_SCREAMING_SNAKE_CASE ,X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 30 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Dict = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class A ( _a , _a ):
'''simple docstring'''
lowerCamelCase : Dict = """nat"""
lowerCamelCase : Any = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Optional[int] , _UpperCamelCase : Dict=4 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : List[Any]=64 , _UpperCamelCase : str=[3, 4, 6, 5] , _UpperCamelCase : Dict=[2, 4, 8, 16] , _UpperCamelCase : List[str]=7 , _UpperCamelCase : Optional[Any]=3.0 , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[Any]=0.0 , _UpperCamelCase : int=0.0 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Union[str, Any]="gelu" , _UpperCamelCase : int=0.0_2 , _UpperCamelCase : Union[str, Any]=1e-5 , _UpperCamelCase : Any=0.0 , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Optional[int]=None , **_UpperCamelCase : Tuple , ):
super().__init__(**_SCREAMING_SNAKE_CASE)
_lowercase: Optional[int] = patch_size
_lowercase: Any = num_channels
_lowercase: Optional[Any] = embed_dim
_lowercase: Union[str, Any] = depths
_lowercase: Union[str, Any] = len(_SCREAMING_SNAKE_CASE)
_lowercase: Dict = num_heads
_lowercase: Optional[int] = kernel_size
_lowercase: Any = mlp_ratio
_lowercase: Optional[Any] = qkv_bias
_lowercase: Union[str, Any] = hidden_dropout_prob
_lowercase: Optional[int] = attention_probs_dropout_prob
_lowercase: Tuple = drop_path_rate
_lowercase: Union[str, Any] = hidden_act
_lowercase: Dict = layer_norm_eps
_lowercase: Any = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowercase: List[Any] = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE) - 1))
_lowercase: Dict = layer_scale_init_value
_lowercase: Any = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(_SCREAMING_SNAKE_CASE) + 1)]
_lowercase: Dict = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names)
| 226 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__a = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__a = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_a )
class __a:
"""simple docstring"""
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
elif titles is None or texts is None:
UpperCAmelCase_ : List[str] = titles if texts is None else texts
return super().__call__(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[Any] = titles if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [titles]
UpperCAmelCase_ : List[str] = texts if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [texts]
UpperCAmelCase_ : Any = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = questions if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [questions] * n_passages
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.''' )
UpperCAmelCase_ : Tuple = super().__call__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : int = super().__call__(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : Optional[int] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ : Dict = attention_mask
return self.pad(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 4 ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = reader_input['''input_ids''']
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = reader_output[:3]
UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = sorted(range(_SCREAMING_SNAKE_CASE ) ,reverse=_SCREAMING_SNAKE_CASE ,key=relevance_logits.__getitem__ )
UpperCAmelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_ : List[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_SCREAMING_SNAKE_CASE ,top_spans=_SCREAMING_SNAKE_CASE ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_SCREAMING_SNAKE_CASE ,start_index=_SCREAMING_SNAKE_CASE ,end_index=_SCREAMING_SNAKE_CASE ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = []
for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ : int = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] ,reverse=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
UpperCAmelCase_ : str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = ['''input_ids''', '''attention_mask'''] | 30 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a ( snake_case__: Union[str, Any] ):
'''simple docstring'''
return "".join(sorted(_lowercase ) )
def a ( snake_case__: str ):
'''simple docstring'''
return word_by_signature[signature(_lowercase )]
__a = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
__a = sorted({word.strip().lower() for word in data.splitlines()})
__a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 97 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 | 0 |
'''simple docstring'''
import operator as op
SCREAMING_SNAKE_CASE = """scaler.pt"""
SCREAMING_SNAKE_CASE = """pytorch_model"""
SCREAMING_SNAKE_CASE = """random_states"""
SCREAMING_SNAKE_CASE = """optimizer"""
SCREAMING_SNAKE_CASE = """scheduler"""
SCREAMING_SNAKE_CASE = """pytorch_model.bin"""
SCREAMING_SNAKE_CASE = """pytorch_model.bin.index.json"""
SCREAMING_SNAKE_CASE = """model.safetensors"""
SCREAMING_SNAKE_CASE = """model.safetensors.index.json"""
SCREAMING_SNAKE_CASE = """1.10.2"""
SCREAMING_SNAKE_CASE = """py38"""
SCREAMING_SNAKE_CASE = """4.17.0"""
SCREAMING_SNAKE_CASE = ["""ml.p3.16xlarge""", """ml.p3dn.24xlarge""", """ml.p4dn.24xlarge"""]
SCREAMING_SNAKE_CASE = ["""FULL_SHARD""", """SHARD_GRAD_OP""", """NO_SHARD""", """HYBRID_SHARD""", """HYBRID_SHARD_ZERO2"""]
SCREAMING_SNAKE_CASE = ["""TRANSFORMER_BASED_WRAP""", """SIZE_BASED_WRAP""", """NO_WRAP"""]
SCREAMING_SNAKE_CASE = ["""BACKWARD_PRE""", """BACKWARD_POST""", """NO_PREFETCH"""]
SCREAMING_SNAKE_CASE = ["""FULL_STATE_DICT""", """LOCAL_STATE_DICT""", """SHARDED_STATE_DICT"""]
SCREAMING_SNAKE_CASE = """2.0.1"""
SCREAMING_SNAKE_CASE = ["""pdsh""", """standard""", """openmpi""", """mvapich"""]
SCREAMING_SNAKE_CASE = ["""default""", """reduce-overhead""", """max-autotune"""]
SCREAMING_SNAKE_CASE = {""">""": op.gt, """>=""": op.ge, """==""": op.eq, """!=""": op.ne, """<=""": op.le, """<""": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
SCREAMING_SNAKE_CASE = [
"""nnodes""",
"""nproc_per_node""",
"""rdzv_backend""",
"""rdzv_endpoint""",
"""rdzv_id""",
"""rdzv_conf""",
"""standalone""",
"""max_restarts""",
"""monitor_interval""",
"""start_method""",
"""role""",
"""module""",
"""m""",
"""no_python""",
"""run_path""",
"""log_dir""",
"""r""",
"""redirects""",
"""t""",
"""tee""",
"""node_rank""",
"""master_addr""",
"""master_port""",
]
SCREAMING_SNAKE_CASE = ["""DEEPSPEED""", """MULTI_GPU""", """FSDP""", """MEGATRON_LM"""]
SCREAMING_SNAKE_CASE = ["""DEEPSPEED""", """MULTI_XPU""", """FSDP"""]
| 199 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''wav2vec2'''
def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.05 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="sum" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1_500) ,_SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Tuple = feat_extract_norm
UpperCAmelCase_ : List[Any] = feat_extract_activation
UpperCAmelCase_ : str = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = conv_bias
UpperCAmelCase_ : str = num_conv_pos_embeddings
UpperCAmelCase_ : Any = num_conv_pos_embedding_groups
UpperCAmelCase_ : Tuple = len(self.conv_dim )
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : str = hidden_dropout
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Tuple = activation_dropout
UpperCAmelCase_ : List[str] = feat_proj_dropout
UpperCAmelCase_ : int = final_dropout
UpperCAmelCase_ : Union[str, Any] = layerdrop
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Optional[int] = do_stable_layer_norm
UpperCAmelCase_ : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : Optional[int] = apply_spec_augment
UpperCAmelCase_ : Tuple = mask_time_prob
UpperCAmelCase_ : Optional[Any] = mask_time_length
UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks
UpperCAmelCase_ : Optional[Any] = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Union[str, Any] = num_codevectors_per_group
UpperCAmelCase_ : Any = num_codevector_groups
UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature
UpperCAmelCase_ : List[str] = feat_quantizer_dropout
UpperCAmelCase_ : Dict = num_negatives
UpperCAmelCase_ : List[str] = codevector_dim
UpperCAmelCase_ : List[str] = proj_codevector_dim
UpperCAmelCase_ : str = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : List[Any] = ctc_loss_reduction
UpperCAmelCase_ : List[str] = ctc_zero_infinity
# adapter
UpperCAmelCase_ : Optional[Any] = add_adapter
UpperCAmelCase_ : Any = adapter_kernel_size
UpperCAmelCase_ : Optional[int] = adapter_stride
UpperCAmelCase_ : List[Any] = num_adapter_layers
UpperCAmelCase_ : Optional[Any] = output_hidden_size or hidden_size
UpperCAmelCase_ : Optional[int] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = xvector_output_dim
@property
def a__ ( self ) -> Any:
return functools.reduce(operator.mul ,self.conv_stride ,1 ) | 30 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowerCAmelCase : Dict ='\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__lowerCAmelCase : Dict ='\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
__lowerCAmelCase : Dict ='\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Tuple:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[str] = False , lowerCAmelCase__ :Union[str, Any] = False , lowerCAmelCase__ :Dict = False , lowerCAmelCase__ :Any = False , ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Tuple = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__SCREAMING_SNAKE_CASE : str = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
__SCREAMING_SNAKE_CASE : Tuple = TER(
normalized=_SCREAMING_SNAKE_CASE , no_punct=_SCREAMING_SNAKE_CASE , asian_support=_SCREAMING_SNAKE_CASE , case_sensitive=_SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE : Optional[Any] = sb_ter.corpus_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 696 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Dict = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 550 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1 ) -> Dict:
UpperCAmelCase_ : List[Any] = tokenizer
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
UpperCAmelCase_ : Optional[int] = n_copies
def __iter__( self ) -> Any:
UpperCAmelCase_ : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : str = start_length
UpperCAmelCase_ : Optional[int] = eof_strings
UpperCAmelCase_ : str = tokenizer
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCAmelCase_ : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = defaultdict(_lowercase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowercase ) ):
with torch.no_grad():
UpperCAmelCase_ : Dict = batch['''ids'''].shape[-1]
UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(_lowercase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase )
# each task is generated batch_size times
UpperCAmelCase_ : Union[str, Any] = batch['''task_id'''].repeat(_lowercase )
UpperCAmelCase_ : Dict = accelerator.pad_across_processes(
_lowercase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCAmelCase_, UpperCAmelCase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase_ : Union[str, Any] = generated_tokens.cpu().numpy()
UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowercase , _lowercase ):
gen_token_dict[task].append(_lowercase )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_lowercase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
code_gens[task].append(remove_last_block(_lowercase ) )
return code_gens
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_lowercase )
UpperCAmelCase_ : int = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase_ : List[Any] = '''false'''
if args.num_workers is None:
UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase_ : int = Accelerator()
set_seed(args.seed , device_specific=_lowercase )
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ : Any = tokenizer.eos_token
UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase_ : str = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase_ : Tuple = load_dataset('''openai_humaneval''' )
UpperCAmelCase_ : Dict = load_metric('''code_eval''' )
UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
UpperCAmelCase_ : str = args.n_samples // args.batch_size
UpperCAmelCase_ : str = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase_ : Optional[Any] = DataLoader(_lowercase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(_lowercase , _lowercase )
UpperCAmelCase_ : int = complete_code(
_lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , )
if accelerator.is_main_process:
UpperCAmelCase_ : Any = []
for task in tqdm(range(_lowercase ) ):
UpperCAmelCase_ : int = human_eval['''test'''][task]['''test''']
UpperCAmelCase_ : str = f'''check({human_eval["test"][task]["entry_point"]})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase_, UpperCAmelCase_ : Any = code_eval_metric.compute(
references=_lowercase , predictions=_lowercase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(_lowercase , _lowercase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 30 | 0 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
A_ = logging.get_logger(__name__)
# General docstring
A_ = "RegNetConfig"
# Base docstring
A_ = "facebook/regnet-y-040"
A_ = [1, 1088, 7, 7]
# Image classification docstring
A_ = "facebook/regnet-y-040"
A_ = "tabby, tabby cat"
A_ = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowercase_ ( nn.Module ):
def __init__( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple = 3 , __lowerCamelCase : Tuple = 1 , __lowerCamelCase : int = 1 , __lowerCamelCase : Optional[Any] = "relu" , ):
super().__init__()
snake_case__ : List[Any] = nn.Convad(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=kernel_size // 2 , groups=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , )
snake_case__ : List[str] = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
snake_case__ : Any = ACTaFN[activation] if activation is not None else nn.Identity()
def _lowerCAmelCase ( self : str , __lowerCamelCase : Optional[Any] ):
snake_case__ : List[str] = self.convolution(_SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.normalization(_SCREAMING_SNAKE_CASE )
snake_case__ : str = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self : Any , __lowerCamelCase : Any ):
super().__init__()
snake_case__ : Optional[int] = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
snake_case__ : Optional[int] = config.num_channels
def _lowerCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str ):
snake_case__ : Dict = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
snake_case__ : Union[str, Any] = self.embedder(_SCREAMING_SNAKE_CASE )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : int , __lowerCamelCase : List[str] = 2 ):
super().__init__()
snake_case__ : int = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , stride=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
snake_case__ : Any = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] ):
snake_case__ : List[str] = self.convolution(_SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.normalization(_SCREAMING_SNAKE_CASE )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ):
super().__init__()
snake_case__ : str = nn.AdaptiveAvgPoolad((1, 1) )
snake_case__ : int = nn.Sequential(
nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ) , nn.ReLU() , nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ) , nn.Sigmoid() , )
def _lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : Any ):
# b c h w -> b c 1 1
snake_case__ : int = self.pooler(_SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = self.attention(_SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = hidden_state * attention
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self : Dict , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : str = 1 ):
super().__init__()
snake_case__ : str = in_channels != out_channels or stride != 1
snake_case__ : Optional[int] = max(1 , out_channels // config.groups_width )
snake_case__ : int = (
RegNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
snake_case__ : Dict = nn.Sequential(
RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , groups=_SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE ) , )
snake_case__ : List[str] = ACTaFN[config.hidden_act]
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : int ):
snake_case__ : int = hidden_state
snake_case__ : Optional[Any] = self.layer(_SCREAMING_SNAKE_CASE )
snake_case__ : int = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
snake_case__ : Optional[int] = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] = 1 ):
super().__init__()
snake_case__ : Optional[int] = in_channels != out_channels or stride != 1
snake_case__ : Dict = max(1 , out_channels // config.groups_width )
snake_case__ : List[Any] = (
RegNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
snake_case__ : List[str] = nn.Sequential(
RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , groups=_SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , RegNetSELayer(_SCREAMING_SNAKE_CASE , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE ) , )
snake_case__ : List[str] = ACTaFN[config.hidden_act]
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : str ):
snake_case__ : Dict = hidden_state
snake_case__ : Union[str, Any] = self.layer(_SCREAMING_SNAKE_CASE )
snake_case__ : int = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
snake_case__ : Optional[int] = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self : str , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int = 2 , __lowerCamelCase : List[Any] = 2 , ):
super().__init__()
snake_case__ : Dict = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
snake_case__ : Dict = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , ) , *[layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for _ in range(depth - 1 )] , )
def _lowerCAmelCase ( self : List[str] , __lowerCamelCase : Tuple ):
snake_case__ : int = self.layers(_SCREAMING_SNAKE_CASE )
return hidden_state
class lowercase_ ( nn.Module ):
def __init__( self : List[str] , __lowerCamelCase : List[str] ):
super().__init__()
snake_case__ : Tuple = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
snake_case__ : Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_SCREAMING_SNAKE_CASE , config.depths[1:] ):
self.stages.append(RegNetStage(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , depth=_SCREAMING_SNAKE_CASE ) )
def _lowerCAmelCase ( self : str , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] = False , __lowerCamelCase : Optional[int] = True ):
snake_case__ : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
snake_case__ : Any = hidden_states + (hidden_state,)
snake_case__ : Dict = stage_module(_SCREAMING_SNAKE_CASE )
if output_hidden_states:
snake_case__ : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE )
class lowercase_ ( _a ):
A_ = RegNetConfig
A_ = "regnet"
A_ = "pixel_values"
A_ = True
def _lowerCAmelCase ( self : Tuple , __lowerCamelCase : Union[str, Any] ):
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : int=False ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = value
A_ = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
A_ = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , _a , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowercase_ ( _a ):
def __init__( self : Optional[int] , __lowerCamelCase : Optional[int] ):
super().__init__(_SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = config
snake_case__ : List[Any] = RegNetEmbeddings(_SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = RegNetEncoder(_SCREAMING_SNAKE_CASE )
snake_case__ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : str = None , __lowerCamelCase : Dict = None ):
snake_case__ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ : Any = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : int = self.embedder(_SCREAMING_SNAKE_CASE )
snake_case__ : int = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
snake_case__ : str = encoder_outputs[0]
snake_case__ : Optional[int] = self.pooler(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , pooler_output=_SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , _a , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowercase_ ( _a ):
def __init__( self : int , __lowerCamelCase : Optional[int] ):
super().__init__(_SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = config.num_labels
snake_case__ : Optional[int] = RegNetModel(_SCREAMING_SNAKE_CASE )
# classification head
snake_case__ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : List[Any] = None , __lowerCamelCase : str = None , __lowerCamelCase : int = None , __lowerCamelCase : int = None , ):
snake_case__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ : Optional[Any] = self.regnet(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
snake_case__ : int = self.classifier(_SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case__ : Optional[int] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case__ : List[str] = '''single_label_classification'''
else:
snake_case__ : Union[str, Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
snake_case__ : str = MSELoss()
if self.num_labels == 1:
snake_case__ : str = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case__ : Optional[Any] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
snake_case__ : str = CrossEntropyLoss()
snake_case__ : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case__ : List[Any] = BCEWithLogitsLoss()
snake_case__ : Any = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not return_dict:
snake_case__ : int = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
| 270 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__a = logging.get_logger(__name__)
__a = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''imagegpt'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=512 + 1 ,_SCREAMING_SNAKE_CASE=32 * 32 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : Union[str, Any] = n_embd
UpperCAmelCase_ : Any = n_layer
UpperCAmelCase_ : Optional[Any] = n_head
UpperCAmelCase_ : Union[str, Any] = n_inner
UpperCAmelCase_ : List[Any] = activation_function
UpperCAmelCase_ : List[str] = resid_pdrop
UpperCAmelCase_ : str = embd_pdrop
UpperCAmelCase_ : Optional[Any] = attn_pdrop
UpperCAmelCase_ : Dict = layer_norm_epsilon
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Dict = scale_attn_weights
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : List[str] = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ : Tuple = reorder_and_upcast_attn
UpperCAmelCase_ : int = tie_word_embeddings
super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
class __a( _a ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,) -> Mapping[str, Any]:
UpperCAmelCase_ : Any = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) )
return inputs | 30 | 0 |
'''simple docstring'''
from torch import nn
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase ) -> Dict:
'''simple docstring'''
super().__init__()
UpperCamelCase : Optional[int] = class_size
UpperCamelCase : Any = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
UpperCamelCase : str = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Dict = self.mlp(_SCREAMING_SNAKE_CASE )
return logits
| 173 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__a = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
__a = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase_ : Union[str, Any] = int(re.match(r'''.*layer_(\d*).*''' , _lowercase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase_ : Any = re.search(r'''[^\d](\d+)$''' , str(_lowercase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
UpperCAmelCase_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if bloom_config_file == "":
UpperCAmelCase_ : Tuple = BloomConfig()
else:
UpperCAmelCase_ : Optional[int] = BloomConfig.from_json_file(_lowercase )
if shard_model:
UpperCAmelCase_ : Any = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = {'''weight_map''': {}, '''metadata''': {}}
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = BloomConfig()
for j, file in enumerate(_lowercase ):
print('''Processing file: {}'''.format(_lowercase ) )
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : Tuple = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Any = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : Dict = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Union[str, Any] = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : Tuple = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : List[str] = tensors[key] / pretraining_tp
torch.save(
_lowercase , os.path.join(
_lowercase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase_ : Union[str, Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) )
UpperCAmelCase_ : List[Any] = BloomConfig()
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : List[str] = total_size
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_lowercase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Optional[Any] = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n'''
f.write(_lowercase )
else:
UpperCAmelCase_ : Any = BloomModel(_lowercase )
UpperCAmelCase_ : Tuple = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = None
for i, file in enumerate(_lowercase ):
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : List[Any] = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Optional[int] = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : str = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Dict = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : Dict = tensors[key] / pretraining_tp
UpperCAmelCase_ : Tuple = model.load_state_dict(_lowercase , strict=_lowercase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
UpperCAmelCase_ : Union[str, Any] = set(other_keys.missing_keys )
else:
UpperCAmelCase_ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCAmelCase_ : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
UpperCAmelCase_ : Optional[int] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _lowercase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
__a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 30 | 0 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
SCREAMING_SNAKE_CASE: Optional[Any] = logging.getLogger()
SCREAMING_SNAKE_CASE: str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase_ (_a ):
def __a ( self : Tuple , snake_case__ : Tuple ):
"""simple docstring"""
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = {'''source''': '''What is love ?''', '''target''': '''life'''}
SCREAMING_SNAKE_CASE_ = {'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
SCREAMING_SNAKE_CASE_ = '''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(_SCREAMING_SNAKE_CASE , f'''{split}.{field}''' ) , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
def __a ( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Dict = "pytorch" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , 'output' )
SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , 'data' )
self._create_dummy_data(data_dir=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
SCREAMING_SNAKE_CASE_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_SCREAMING_SNAKE_CASE , env=self.get_env() )
SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , 'metrics.json' )
with open(_SCREAMING_SNAKE_CASE ) as f:
SCREAMING_SNAKE_CASE_ = json.load(_SCREAMING_SNAKE_CASE )
return result
@require_torch_gpu
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 ) | 360 |
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
for i in range(1 , 1001 ):
total += i**i
return str(_lowercase )[-10:]
if __name__ == "__main__":
print(solution()) | 30 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
A__ : int = ViTImageProcessor if is_vision_available() else None
@property
def __A ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> List[Any]:
_UpperCAmelCase = (3, 32, 128)
_UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_UpperCAmelCase = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
_UpperCAmelCase = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
_UpperCAmelCase = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self , **snake_case_ ) -> Any:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __A ( self , **snake_case_ ) -> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> List[Any]:
_UpperCAmelCase = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
_UpperCAmelCase = Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) )
return image_input
def __A ( self ) -> List[str]:
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Any:
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
_UpperCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Dict:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(_SCREAMING_SNAKE_CASE , return_tensors="np" )
_UpperCAmelCase = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self ) -> List[Any]:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''test'''
_UpperCAmelCase = processor(text=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer(_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ) -> List[str]:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = '''test'''
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def __A ( self ) -> List[str]:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.char_decode(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> List[Any]:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = None
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __A ( self ) -> Optional[Any]:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = MgpstrProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.randn(1 , 27 , 38 )
_UpperCAmelCase = torch.randn(1 , 27 , 50257 )
_UpperCAmelCase = torch.randn(1 , 27 , 30522 )
_UpperCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 426 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__a = None
__a = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__a = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = None
# Automatically constructed
lowerCAmelCase = "PIL.Image.Image"
lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase = field(default='''Image''' , init=_a , repr=_a )
def __call__( self ) -> Tuple:
return self.pa_type
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Dict = path.split('''::''' )[-1]
try:
UpperCAmelCase_ : Optional[int] = string_to_dict(_SCREAMING_SNAKE_CASE ,config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase_ : Tuple = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
UpperCAmelCase_ : Optional[Any] = None
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ,use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
UpperCAmelCase_ : List[str] = BytesIO(f.read() )
UpperCAmelCase_ : Optional[Any] = PIL.Image.open(bytes_ )
else:
UpperCAmelCase_ : List[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCAmelCase_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase_ : Dict = storage.field('''bytes''' )
else:
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase_ : int = storage.field('''path''' )
else:
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f:
UpperCAmelCase_ : Any = f.read()
return bytes_
UpperCAmelCase_ : Union[str, Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
UpperCAmelCase_ : List[str] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,)
UpperCAmelCase_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def lowerCamelCase__ ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase_ : Optional[int] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase_ : int = image.format
else:
UpperCAmelCase_ : List[Any] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(_lowercase , format=_lowercase )
return buffer.getvalue()
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if hasattr(_lowercase , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
UpperCAmelCase_ : Tuple = array.dtype
UpperCAmelCase_ : List[str] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
UpperCAmelCase_ : Dict = dtype.kind
UpperCAmelCase_ : Union[str, Any] = dtype.itemsize
UpperCAmelCase_ : Optional[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase_ : Tuple = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase_ : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase_ : Union[str, Any] = dtype_byteorder + dtype_kind + str(_lowercase )
UpperCAmelCase_ : str = np.dtype(_lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCAmelCase_ : Any = PIL.Image.fromarray(array.astype(_lowercase ) )
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
UpperCAmelCase_, UpperCAmelCase_ : Tuple = first_non_null_value(_lowercase )
if isinstance(_lowercase , _lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowercase , np.ndarray ):
UpperCAmelCase_ : Any = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
elif isinstance(_lowercase , PIL.Image.Image ):
UpperCAmelCase_ : Union[str, Any] = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
else:
return objs
else:
return objs | 30 | 0 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__lowercase : List[str] = ['text', 'image', 'audio']
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple ):
__a : int = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(_lowercase , _lowercase ):
inputs.append(create_inputs(_lowercase ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
__a : Dict = []
for output in outputs:
if isinstance(_lowercase , (str, AgentText) ):
output_types.append('text' )
elif isinstance(_lowercase , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(_lowercase , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class __UpperCamelCase :
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
__a : str = self.tool.inputs
for _input in inputs:
if isinstance(_input , _SCREAMING_SNAKE_CASE ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__a : Union[str, Any] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = create_inputs(self.tool.inputs )
__a : Any = self.tool(*_SCREAMING_SNAKE_CASE )
# There is a single output
if len(self.tool.outputs ) == 1:
__a : Optional[int] = [outputs]
self.assertListEqual(output_types(_SCREAMING_SNAKE_CASE ) , self.tool.outputs )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = create_inputs(self.tool.inputs )
__a : str = self.tool(*_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Union[str, Any] = [outputs]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
for output, output_type in zip(_SCREAMING_SNAKE_CASE , self.tool.outputs ):
__a : Union[str, Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = create_inputs(self.tool.inputs )
__a : Tuple = []
for _input, input_type in zip(_SCREAMING_SNAKE_CASE , self.tool.inputs ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__a : List[Any] = self.tool(*_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : Optional[Any] = [outputs]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(self.tool.outputs ) )
| 476 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
UpperCAmelCase_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
UpperCAmelCase_ : Optional[int] = load_dataset('''nielsr/rvlcdip-demo''' )
UpperCAmelCase_ : Optional[Any] = dataset['''train'''][0]['''image'''].convert('''RGB''' )
UpperCAmelCase_ : str = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.logits
UpperCAmelCase_ : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.float ,)
self.assertTrue(torch.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) | 30 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 306 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) | 30 | 0 |
from __future__ import annotations
_SCREAMING_SNAKE_CASE : Optional[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_SCREAMING_SNAKE_CASE : Optional[int] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: List[Any] = []
_lowercase: List[str] = len(_lowercase )
for i in range(_lowercase ):
_lowercase: float = -1
for j in range(i + 1 , _lowercase ):
if arr[i] < arr[j]:
_lowercase: Union[str, Any] = arr[j]
break
result.append(_lowercase )
return result
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Union[str, Any] = []
for i, outer in enumerate(_lowercase ):
_lowercase: float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_lowercase: List[Any] = inner
break
result.append(_lowercase )
return result
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Optional[int] = len(_lowercase )
_lowercase: list[float] = []
_lowercase: list[float] = [-1] * arr_size
for index in reversed(range(_lowercase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_lowercase: int = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_SCREAMING_SNAKE_CASE : Tuple = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 226 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0.9 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,) -> Optional[int]:
UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 30}
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Any = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize_and_center_crop
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : List[str] = crop_pct
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : Any = do_normalize
UpperCAmelCase_ : str = image_mean
UpperCAmelCase_ : List[Any] = image_std
def a__ ( self ) -> str:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def a__ ( self ) -> Dict:
UpperCAmelCase_ : str = PoolFormerImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''crop_pct''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_mean''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_std''' ) )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size ,{'''height''': 30, '''width''': 30} )
UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,) | 30 | 0 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : List[Any]=5_6 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Dict=9_9 , SCREAMING_SNAKE_CASE_ : Any=3_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=7 , SCREAMING_SNAKE_CASE_ : str="gelu_new" , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : str=5_1_2 , SCREAMING_SNAKE_CASE_ : Tuple=1_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="block_sparse" , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=3 , ) -> Tuple:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_attention_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_choices
lowercase_ = rescale_embeddings
lowercase_ = attention_type
lowercase_ = use_bias
lowercase_ = block_size
lowercase_ = num_random_blocks
def _lowercase ( self : Tuple ) -> Union[str, Any]:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_attention_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase ( self : List[str] ) -> Optional[Any]:
lowercase_ = self.prepare_config_and_inputs()
lowercase_ = config_and_inputs
lowercase_ = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_flax
class lowercase__( _a , unittest.TestCase ):
"""simple docstring"""
a :Optional[Any] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
a :List[Any] = False
a :Tuple = False
def _lowercase ( self : Optional[Any] ) -> int:
lowercase_ = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowercase ( self : Optional[int] ) -> Optional[int]:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowercase ( self : Tuple ) -> Optional[Any]:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowercase ( self : List[Any] ) -> Tuple:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowercase ( self : List[Any] ) -> int:
super().test_hidden_states_output()
@slow
def _lowercase ( self : Optional[Any] ) -> str:
for model_class_name in self.all_model_classes:
lowercase_ = model_class_name.from_pretrained('''google/bigbird-roberta-base''' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[int] ) -> Any:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def _lowercase ( self : Union[str, Any] ) -> List[str]:
lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase_ = model_class(_SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : int ):
return model(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
with self.subTest('''JIT Enabled''' ):
lowercase_ = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowercase_ = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any]=1e-5 , SCREAMING_SNAKE_CASE_ : Any="outputs" , SCREAMING_SNAKE_CASE_ : int=None ) -> Any:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('''outputs.attentions''' ):
return
else:
super().check_pt_flax_outputs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 97 |
import unittest
import numpy as np
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = None , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = np.shape(_lowercase )
UpperCAmelCase_ : Optional[Any] = np.shape(_lowercase )
UpperCAmelCase_ : Tuple = np.shape(_lowercase )
if shape_a[0] != shape_b[0]:
UpperCAmelCase_ : Tuple = (
'''Expected the same number of rows for A and B. '''
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(_lowercase )
if shape_b[1] != shape_c[1]:
UpperCAmelCase_ : List[Any] = (
'''Expected the same number of columns for B and C. '''
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(_lowercase )
UpperCAmelCase_ : Dict = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase_ : Any = np.linalg.inv(_lowercase )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : List[str] = np.array([[2, 1], [6, 3]] )
UpperCAmelCase_ : Tuple = schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.block([[a, b], [b.T, c]] )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = np.linalg.det(_SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE ,det_a * det_s )
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> None:
UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : int = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 30 | 0 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple , snake_case__ : Optional[int] , snake_case__ : Union[str, Any]=13 , snake_case__ : Any=30 , snake_case__ : int=2 , snake_case__ : Union[str, Any]=3 , snake_case__ : int=True , snake_case__ : Tuple=True , snake_case__ : List[Any]=32 , snake_case__ : str=5 , snake_case__ : Optional[int]=4 , snake_case__ : List[str]=37 , snake_case__ : str="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[str]=10 , snake_case__ : Any=0.02 , snake_case__ : Union[str, Any]=3 , snake_case__ : Dict=0.6 , snake_case__ : Any=None , ):
'''simple docstring'''
UpperCAmelCase__ : str = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : Tuple = patch_size
UpperCAmelCase__ : Dict = num_channels
UpperCAmelCase__ : List[Any] = is_training
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : Optional[Any] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Dict = mask_ratio
UpperCAmelCase__ : Optional[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase__ : Union[str, Any] = (image_size // patch_size) ** 2
UpperCAmelCase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCamelCase ( self : Dict , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = ViTMAEModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase__ : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase__ : Dict = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : List[Any] = (self.image_size // self.patch_size) ** 2
UpperCAmelCase__ : Dict = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : Optional[Any] = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase__ : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Dict = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ : Any = config_and_inputs
UpperCAmelCase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Tuple = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowercase_ : List[str] = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
lowercase_ : int = False
lowercase_ : List[Any] = False
lowercase_ : str = False
lowercase_ : List[str] = False
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = ViTMAEModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Any = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase__ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : int , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Tuple ):
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase__ : List[Any] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase__ : Any = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase__ : Union[str, Any] = pt_noise
super().check_pt_tf_models(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : List[str] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : Dict = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase__ : List[str] = outputs[0].cpu().numpy()
UpperCAmelCase__ : str = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Any = model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase__ : int = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Make sure we don't have nans
UpperCAmelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCamelCase ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def UpperCamelCase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
pass
@slow
def UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Tuple = ViTMAEModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def snake_case_ ( ):
UpperCAmelCase__ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
np.random.seed(2 )
UpperCAmelCase__ : List[str] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : int = self.default_image_processor
UpperCAmelCase__ : str = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(_SCREAMING_SNAKE_CASE )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase__ : Any = ViTMAEConfig()
UpperCAmelCase__ : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase__ : Optional[Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : int = model(**_SCREAMING_SNAKE_CASE , noise=torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ) )
# verify the logits
UpperCAmelCase__ : Tuple = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_SCREAMING_SNAKE_CASE ) , atol=1e-4 ) )
| 199 |
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
UpperCAmelCase_ : Any = ''''''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
UpperCAmelCase_ : Any = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase_ : Union[str, Any] = B'''=''' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
UpperCAmelCase_ : int = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowercase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Tuple = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase , _lowercase ):
try:
UpperCAmelCase_ : Any = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
UpperCAmelCase_ : str = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase_ : List[Any] = encoded_data[:-padding]
UpperCAmelCase_ : List[Any] = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase_ : Tuple = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase_ : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowercase ) , 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 1 / sqrt(2 ) ):
__SCREAMING_SNAKE_CASE : Tuple = tau * frequency / samplerate
__SCREAMING_SNAKE_CASE : int = sin(_lowercase )
__SCREAMING_SNAKE_CASE : List[str] = cos(_lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = _sin / (2 * q_factor)
__SCREAMING_SNAKE_CASE : str = (1 - _cos) / 2
__SCREAMING_SNAKE_CASE : Dict = 1 - _cos
__SCREAMING_SNAKE_CASE : Dict = 1 + alpha
__SCREAMING_SNAKE_CASE : Any = -2 * _cos
__SCREAMING_SNAKE_CASE : List[Any] = 1 - alpha
__SCREAMING_SNAKE_CASE : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 1 / sqrt(2 ) ):
__SCREAMING_SNAKE_CASE : Any = tau * frequency / samplerate
__SCREAMING_SNAKE_CASE : Optional[int] = sin(_lowercase )
__SCREAMING_SNAKE_CASE : Optional[Any] = cos(_lowercase )
__SCREAMING_SNAKE_CASE : List[str] = _sin / (2 * q_factor)
__SCREAMING_SNAKE_CASE : Tuple = (1 + _cos) / 2
__SCREAMING_SNAKE_CASE : Any = -1 - _cos
__SCREAMING_SNAKE_CASE : Dict = 1 + alpha
__SCREAMING_SNAKE_CASE : Optional[Any] = -2 * _cos
__SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha
__SCREAMING_SNAKE_CASE : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 1 / sqrt(2 ) ):
__SCREAMING_SNAKE_CASE : Tuple = tau * frequency / samplerate
__SCREAMING_SNAKE_CASE : Union[str, Any] = sin(_lowercase )
__SCREAMING_SNAKE_CASE : int = cos(_lowercase )
__SCREAMING_SNAKE_CASE : Tuple = _sin / (2 * q_factor)
__SCREAMING_SNAKE_CASE : List[str] = _sin / 2
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : str = -ba
__SCREAMING_SNAKE_CASE : Optional[int] = 1 + alpha
__SCREAMING_SNAKE_CASE : Optional[int] = -2 * _cos
__SCREAMING_SNAKE_CASE : Any = 1 - alpha
__SCREAMING_SNAKE_CASE : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 1 / sqrt(2 ) ):
__SCREAMING_SNAKE_CASE : Optional[Any] = tau * frequency / samplerate
__SCREAMING_SNAKE_CASE : List[Any] = sin(_lowercase )
__SCREAMING_SNAKE_CASE : Optional[int] = cos(_lowercase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = _sin / (2 * q_factor)
__SCREAMING_SNAKE_CASE : Any = 1 - alpha
__SCREAMING_SNAKE_CASE : int = -2 * _cos
__SCREAMING_SNAKE_CASE : List[str] = 1 + alpha
__SCREAMING_SNAKE_CASE : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1 / sqrt(2 ) , ):
__SCREAMING_SNAKE_CASE : List[str] = tau * frequency / samplerate
__SCREAMING_SNAKE_CASE : int = sin(_lowercase )
__SCREAMING_SNAKE_CASE : int = cos(_lowercase )
__SCREAMING_SNAKE_CASE : Tuple = _sin / (2 * q_factor)
__SCREAMING_SNAKE_CASE : Dict = 10 ** (gain_db / 40)
__SCREAMING_SNAKE_CASE : List[str] = 1 + alpha * big_a
__SCREAMING_SNAKE_CASE : Tuple = -2 * _cos
__SCREAMING_SNAKE_CASE : Tuple = 1 - alpha * big_a
__SCREAMING_SNAKE_CASE : Tuple = 1 + alpha / big_a
__SCREAMING_SNAKE_CASE : Optional[Any] = -2 * _cos
__SCREAMING_SNAKE_CASE : Dict = 1 - alpha / big_a
__SCREAMING_SNAKE_CASE : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1 / sqrt(2 ) , ):
__SCREAMING_SNAKE_CASE : List[str] = tau * frequency / samplerate
__SCREAMING_SNAKE_CASE : Dict = sin(_lowercase )
__SCREAMING_SNAKE_CASE : List[str] = cos(_lowercase )
__SCREAMING_SNAKE_CASE : str = _sin / (2 * q_factor)
__SCREAMING_SNAKE_CASE : str = 10 ** (gain_db / 40)
__SCREAMING_SNAKE_CASE : Tuple = (big_a + 1) - (big_a - 1) * _cos
__SCREAMING_SNAKE_CASE : Optional[Any] = (big_a + 1) + (big_a - 1) * _cos
__SCREAMING_SNAKE_CASE : str = (big_a - 1) - (big_a + 1) * _cos
__SCREAMING_SNAKE_CASE : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
__SCREAMING_SNAKE_CASE : int = 2 * sqrt(_lowercase ) * alpha
__SCREAMING_SNAKE_CASE : Optional[Any] = big_a * (pmc + aaa)
__SCREAMING_SNAKE_CASE : List[str] = 2 * big_a * mpc
__SCREAMING_SNAKE_CASE : Tuple = big_a * (pmc - aaa)
__SCREAMING_SNAKE_CASE : Optional[Any] = ppmc + aaa
__SCREAMING_SNAKE_CASE : List[str] = -2 * pmpc
__SCREAMING_SNAKE_CASE : Dict = ppmc - aaa
__SCREAMING_SNAKE_CASE : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1 / sqrt(2 ) , ):
__SCREAMING_SNAKE_CASE : Any = tau * frequency / samplerate
__SCREAMING_SNAKE_CASE : Dict = sin(_lowercase )
__SCREAMING_SNAKE_CASE : int = cos(_lowercase )
__SCREAMING_SNAKE_CASE : str = _sin / (2 * q_factor)
__SCREAMING_SNAKE_CASE : Tuple = 10 ** (gain_db / 40)
__SCREAMING_SNAKE_CASE : int = (big_a + 1) - (big_a - 1) * _cos
__SCREAMING_SNAKE_CASE : Tuple = (big_a + 1) + (big_a - 1) * _cos
__SCREAMING_SNAKE_CASE : str = (big_a - 1) - (big_a + 1) * _cos
__SCREAMING_SNAKE_CASE : Dict = (big_a - 1) + (big_a + 1) * _cos
__SCREAMING_SNAKE_CASE : Any = 2 * sqrt(_lowercase ) * alpha
__SCREAMING_SNAKE_CASE : Union[str, Any] = big_a * (ppmc + aaa)
__SCREAMING_SNAKE_CASE : int = -2 * big_a * pmpc
__SCREAMING_SNAKE_CASE : Union[str, Any] = big_a * (ppmc - aaa)
__SCREAMING_SNAKE_CASE : Optional[Any] = pmc + aaa
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * mpc
__SCREAMING_SNAKE_CASE : str = pmc - aaa
__SCREAMING_SNAKE_CASE : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 696 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 0
@slow
def a__ ( self ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,20 )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained('''./''' ,tokenizer_type='''xxx''' )
@require_tokenizers
def a__ ( self ) -> Optional[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase_ : Any = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
else:
self.assertEqual(tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
@require_tokenizers
def a__ ( self ) -> List[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' ,):
UpperCAmelCase_ : int = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def a__ ( self ) -> Optional[Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
UpperCAmelCase_ : int = TOKENIZER_MAPPING.values()
UpperCAmelCase_ : List[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Tuple:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ,use_fast=_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = '''Hello, world. How are you?'''
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
@require_tokenizers
def a__ ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
self.assertEqual(tokenizer.vocab_size ,30_000 )
self.assertEqual(tokenizer.unk_token ,'''[UNK]''' )
self.assertEqual(tokenizer.padding_side ,'''right''' )
self.assertEqual(tokenizer.truncation_side ,'''right''' )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size ,12 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
# Check we can load the tokenizer config of an online model.
UpperCAmelCase_ : int = get_tokenizer_config('''bert-base-cased''' )
UpperCAmelCase_ : Optional[int] = config.pop('''_commit_hash''' ,_SCREAMING_SNAKE_CASE )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_SCREAMING_SNAKE_CASE ,{'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase_ : Any = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] ,'''BertTokenizer''' )
def a__ ( self ) -> str:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def a__ ( self ) -> int:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
# Can register in two steps
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = False
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = NewTokenizer
lowerCAmelCase = False
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
def a__ ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' )
def a__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' )
def a__ ( self ) -> Any:
# Make sure we have cached the tokenizer.
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 ) | 30 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class A__ :
"""simple docstring"""
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=0.2 , __snake_case=0.2 ):
snake_case = bp_numa
snake_case = bp_numa
snake_case = bp_numa
snake_case = conva_get[:2]
snake_case = conva_get[2]
snake_case = size_pa
snake_case = rate_w
snake_case = rate_t
snake_case = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
snake_case = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
snake_case = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
snake_case = -2 * np.random.rand(self.conva[1] ) + 1
snake_case = -2 * np.random.rand(self.num_bpa ) + 1
snake_case = -2 * np.random.rand(self.num_bpa ) + 1
def a_ ( self , __snake_case ):
# save model dict with pickle
snake_case = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_SCREAMING_SNAKE_CASE , '''wb''' ) as f:
pickle.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print(F'''Model saved: {save_path}''' )
@classmethod
def a_ ( cls , __snake_case ):
# read saved model
with open(_SCREAMING_SNAKE_CASE , '''rb''' ) as f:
snake_case = pickle.load(_SCREAMING_SNAKE_CASE ) # noqa: S301
snake_case = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
snake_case = model_dic.get('''size_pooling1''' )
snake_case = model_dic.get('''num_bp1''' )
snake_case = model_dic.get('''num_bp2''' )
snake_case = model_dic.get('''num_bp3''' )
snake_case = model_dic.get('''rate_weight''' )
snake_case = model_dic.get('''rate_thre''' )
# create model instance
snake_case = CNN(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# modify model parameter
snake_case = model_dic.get('''w_conv1''' )
snake_case = model_dic.get('''wkj''' )
snake_case = model_dic.get('''vji''' )
snake_case = model_dic.get('''thre_conv1''' )
snake_case = model_dic.get('''thre_bp2''' )
snake_case = model_dic.get('''thre_bp3''' )
return conv_ins
def a_ ( self , __snake_case ):
return 1 / (1 + np.exp(-1 * x ))
def a_ ( self , __snake_case ):
return round(_SCREAMING_SNAKE_CASE , 3 )
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
# convolution process
snake_case = convs[0]
snake_case = convs[1]
snake_case = np.shape(_SCREAMING_SNAKE_CASE )[0]
# get the data slice of original image data, data_focus
snake_case = []
for i_focus in range(0 , size_data - size_conv + 1 , _SCREAMING_SNAKE_CASE ):
for j_focus in range(0 , size_data - size_conv + 1 , _SCREAMING_SNAKE_CASE ):
snake_case = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_SCREAMING_SNAKE_CASE )
# calculate the feature map of every single kernel, and saved as list of matrix
snake_case = []
snake_case = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_SCREAMING_SNAKE_CASE ):
snake_case = []
for i_focus in range(len(_SCREAMING_SNAKE_CASE ) ):
snake_case = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_SCREAMING_SNAKE_CASE ) )
snake_case = np.asmatrix(_SCREAMING_SNAKE_CASE ).reshape(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
data_featuremap.append(_SCREAMING_SNAKE_CASE )
# expanding the data slice to One dimenssion
snake_case = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_SCREAMING_SNAKE_CASE ) )
snake_case = np.asarray(_SCREAMING_SNAKE_CASE )
return focus_list, data_featuremap
def a_ ( self , __snake_case , __snake_case , __snake_case="average_pool" ):
# pooling process
snake_case = len(featuremaps[0] )
snake_case = int(size_map / size_pooling )
snake_case = []
for i_map in range(len(_SCREAMING_SNAKE_CASE ) ):
snake_case = featuremaps[i_map]
snake_case = []
for i_focus in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for j_focus in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_SCREAMING_SNAKE_CASE ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_SCREAMING_SNAKE_CASE ) )
snake_case = np.asmatrix(_SCREAMING_SNAKE_CASE ).reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
featuremap_pooled.append(_SCREAMING_SNAKE_CASE )
return featuremap_pooled
def a_ ( self , __snake_case ):
# expanding three dimension data to one dimension list
snake_case = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
snake_case = np.shape(data[i] )
snake_case = data[i].reshape(1 , shapes[0] * shapes[1] )
snake_case = data_listed.getA().tolist()[0]
data_expanded.extend(_SCREAMING_SNAKE_CASE )
snake_case = np.asarray(_SCREAMING_SNAKE_CASE )
return data_expanded
def a_ ( self , __snake_case ):
# expanding matrix to one dimension list
snake_case = np.asarray(_SCREAMING_SNAKE_CASE )
snake_case = np.shape(_SCREAMING_SNAKE_CASE )
snake_case = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
snake_case = []
snake_case = 0
for i_map in range(_SCREAMING_SNAKE_CASE ):
snake_case = np.ones((size_map, size_map) )
for i in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for j in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case = pd_pool[
i_pool
]
snake_case = i_pool + 1
snake_case = np.multiply(
_SCREAMING_SNAKE_CASE , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_SCREAMING_SNAKE_CASE )
return pd_all
def a_ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_SCREAMING_SNAKE_CASE )) )
print((''' - - Shape: Teach_Data ''', np.shape(_SCREAMING_SNAKE_CASE )) )
snake_case = 0
snake_case = []
snake_case = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
snake_case = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(_SCREAMING_SNAKE_CASE ) ):
# print('------------Learning Image: %d--------------'%p)
snake_case = np.asmatrix(datas_train[p] )
snake_case = np.asarray(datas_teach[p] )
snake_case = self.convolute(
_SCREAMING_SNAKE_CASE , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
snake_case = self.pooling(_SCREAMING_SNAKE_CASE , self.size_poolinga )
snake_case = np.shape(_SCREAMING_SNAKE_CASE )
snake_case = self._expand(_SCREAMING_SNAKE_CASE )
snake_case = data_bp_input
snake_case = np.dot(_SCREAMING_SNAKE_CASE , self.vji.T ) - self.thre_bpa
snake_case = self.sig(_SCREAMING_SNAKE_CASE )
snake_case = np.dot(_SCREAMING_SNAKE_CASE , self.wkj.T ) - self.thre_bpa
snake_case = self.sig(_SCREAMING_SNAKE_CASE )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
snake_case = np.multiply(
(data_teach - bp_outa) , np.multiply(_SCREAMING_SNAKE_CASE , (1 - bp_outa) ) )
snake_case = np.multiply(
np.dot(_SCREAMING_SNAKE_CASE , self.wkj ) , np.multiply(_SCREAMING_SNAKE_CASE , (1 - bp_outa) ) )
snake_case = np.dot(_SCREAMING_SNAKE_CASE , self.vji )
snake_case = pd_i_all / (self.size_poolinga * self.size_poolinga)
snake_case = pd_conva_pooled.T.getA().tolist()
snake_case = self._calculate_gradient_from_pool(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
snake_case = self._expand_mat(pd_conva_all[k_conv] )
snake_case = self.rate_weight * np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
snake_case = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
snake_case = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
snake_case = self.vji + pd_j_all.T * bp_outa * self.rate_weight
snake_case = self.thre_bpa - pd_k_all * self.rate_thre
snake_case = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
snake_case = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
snake_case = rp + 1
snake_case = error_count / patterns
all_mse.append(_SCREAMING_SNAKE_CASE )
def draw_error():
snake_case = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_SCREAMING_SNAKE_CASE , '''+-''' )
plt.plot(_SCREAMING_SNAKE_CASE , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_SCREAMING_SNAKE_CASE , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def a_ ( self , __snake_case ):
# model predict
snake_case = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_SCREAMING_SNAKE_CASE )) )
for p in range(len(_SCREAMING_SNAKE_CASE ) ):
snake_case = np.asmatrix(datas_test[p] )
snake_case = self.convolute(
_SCREAMING_SNAKE_CASE , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
snake_case = self.pooling(_SCREAMING_SNAKE_CASE , self.size_poolinga )
snake_case = self._expand(_SCREAMING_SNAKE_CASE )
snake_case = data_bp_input
snake_case = bp_outa * self.vji.T - self.thre_bpa
snake_case = self.sig(_SCREAMING_SNAKE_CASE )
snake_case = bp_outa * self.wkj.T - self.thre_bpa
snake_case = self.sig(_SCREAMING_SNAKE_CASE )
produce_out.extend(bp_outa.getA().tolist() )
snake_case = [list(map(self.do_round , _SCREAMING_SNAKE_CASE ) ) for each in produce_out]
return np.asarray(_SCREAMING_SNAKE_CASE )
def a_ ( self , __snake_case ):
# return the data of image after convoluting process so we can check it out
snake_case = np.asmatrix(_SCREAMING_SNAKE_CASE )
snake_case = self.convolute(
_SCREAMING_SNAKE_CASE , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
snake_case = self.pooling(_SCREAMING_SNAKE_CASE , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 550 |
from functools import reduce
__a = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase__ ( _lowercase = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) )
for i in range(len(_lowercase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 30 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( _a , unittest.TestCase ):
A_ = GPTaTokenizer
A_ = GPTaTokenizerFast
A_ = True
A_ = {"add_prefix_space": True}
A_ = False
def _lowerCAmelCase ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : Optional[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
snake_case__ : Optional[Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case__ : Optional[int] = {'''unk_token''': '''<unk>'''}
snake_case__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_SCREAMING_SNAKE_CASE ) )
def _lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Tuple ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Tuple , **__lowerCamelCase : Optional[Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : List[Any] ):
snake_case__ : Optional[int] = '''lower newer'''
snake_case__ : List[str] = '''lower newer'''
return input_text, output_text
def _lowerCAmelCase ( self : int ):
snake_case__ : List[str] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : int = '''lower newer'''
snake_case__ : Tuple = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case__ : Dict = tokenizer.tokenize(_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokens + [tokenizer.unk_token]
snake_case__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : Tuple ):
if not self.test_rust_tokenizer:
return
snake_case__ : Union[str, Any] = self.get_tokenizer()
snake_case__ : int = self.get_rust_tokenizer(add_prefix_space=_SCREAMING_SNAKE_CASE )
snake_case__ : Dict = '''lower newer'''
# Testing tokenization
snake_case__ : Dict = tokenizer.tokenize(_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
snake_case__ : Any = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
snake_case__ : str = self.get_rust_tokenizer(add_prefix_space=_SCREAMING_SNAKE_CASE )
snake_case__ : Any = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Testing the unknown token
snake_case__ : Dict = tokens + [rust_tokenizer.unk_token]
snake_case__ : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self : str , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _lowerCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[Any]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# Simple input
snake_case__ : str = '''This is a simple input'''
snake_case__ : str = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case__ : int = ('''This is a simple input''', '''This is a pair''')
snake_case__ : Any = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' )
# Simple input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' , )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(_SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' )
# Pair input
self.assertRaises(
_SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , _SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding='max_length' , )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : Tuple = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
snake_case__ : str = '''This is a simple input'''
snake_case__ : Tuple = ['''This is a simple input looooooooong''', '''This is a simple input''']
snake_case__ : Union[str, Any] = ('''This is a simple input''', '''This is a pair''')
snake_case__ : Optional[Any] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
snake_case__ : Union[str, Any] = tokenizer.pad_token_id
snake_case__ : Dict = tokenizer(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=30 , return_tensors='np' )
snake_case__ : int = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncate=_SCREAMING_SNAKE_CASE , return_tensors='np' )
snake_case__ : Tuple = tokenizer(*_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=60 , return_tensors='np' )
snake_case__ : List[Any] = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncate=_SCREAMING_SNAKE_CASE , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _lowerCAmelCase ( self : Optional[Any] ):
snake_case__ : List[Any] = '''$$$'''
snake_case__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_SCREAMING_SNAKE_CASE , add_bos_token=_SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = '''This is a simple input'''
snake_case__ : Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case__ : List[Any] = tokenizer.bos_token_id
snake_case__ : List[str] = tokenizer(_SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = tokenizer(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , _SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case__ : Tuple = tokenizer.decode(out_s.input_ids )
snake_case__ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def _lowerCAmelCase ( self : Optional[int] ):
pass
def _lowerCAmelCase ( self : int ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
snake_case__ : Tuple = [self.get_tokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , add_bos_token=_SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
snake_case__ : List[Any] = '''Encode this.'''
snake_case__ : List[Any] = '''This one too please.'''
snake_case__ : Optional[int] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case__ : Dict = tokenizer.encode_plus(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , )
snake_case__ : int = encoded_sequence_dict['''input_ids''']
snake_case__ : Union[str, Any] = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
snake_case__ : List[str] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_SCREAMING_SNAKE_CASE )
]
snake_case__ : str = [x for x in filtered_sequence if x is not None]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@require_tokenizers
class lowercase_ ( unittest.TestCase ):
def _lowerCAmelCase ( self : List[Any] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
snake_case__ : Any = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=_SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = '''A photo of a cat'''
snake_case__ : Any = tokenizer.encode(
_SCREAMING_SNAKE_CASE , )
self.assertEqual(_SCREAMING_SNAKE_CASE , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
snake_case__ : Dict = AutoTokenizer.from_pretrained('./test_opt' )
snake_case__ : Optional[Any] = tokenizer.encode(
_SCREAMING_SNAKE_CASE , )
self.assertEqual(_SCREAMING_SNAKE_CASE , [2, 250, 1345, 9, 10, 4758] )
def _lowerCAmelCase ( self : str ):
snake_case__ : Tuple = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=_SCREAMING_SNAKE_CASE )
snake_case__ : Dict = '''A photo of a cat'''
snake_case__ : int = tokenizer.encode(
_SCREAMING_SNAKE_CASE , )
# Same as above
self.assertEqual(_SCREAMING_SNAKE_CASE , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def _lowerCAmelCase ( self : int ):
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=_SCREAMING_SNAKE_CASE )
snake_case__ : Any = '''bos'''
snake_case__ : Dict = tokenizer.get_vocab()['''bos''']
snake_case__ : Optional[int] = '''A photo of a cat'''
snake_case__ : Any = tokenizer.encode(
_SCREAMING_SNAKE_CASE , )
# We changed the bos token
self.assertEqual(_SCREAMING_SNAKE_CASE , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
snake_case__ : Tuple = tokenizer.encode(
_SCREAMING_SNAKE_CASE , )
self.assertEqual(_SCREAMING_SNAKE_CASE , [31957, 250, 1345, 9, 10, 4758] )
| 270 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
UpperCAmelCase_ : Tuple = precision
UpperCAmelCase_ : Optional[Any] = ceil(precision / 14 )
UpperCAmelCase_ : int = 426880 * Decimal(10005 ).sqrt()
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : List[Any] = 13591409
UpperCAmelCase_ : Optional[Any] = Decimal(_lowercase )
for k in range(1 , _lowercase ):
UpperCAmelCase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__a = 50
print(F"""The first {n} digits of pi is: {pi(n)}""") | 30 | 0 |
'''simple docstring'''
def A__ ( A : str):
'''simple docstring'''
if edge <= 0 or not isinstance(_lowercase , _lowercase):
raise ValueError("Length must be a positive.")
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def A__ ( A : List[str]):
'''simple docstring'''
if edge <= 0 or not isinstance(_lowercase , _lowercase):
raise ValueError("Length must be a positive.")
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173 |
from __future__ import annotations
import math
__a = '2020.9.26'
__a = 'xcodz-dot, cclaus, dhruvmanila'
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ):
UpperCAmelCase_ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(_lowercase )
UpperCAmelCase_ : Tuple = ((x * distance) / (z + distance)) * scale
UpperCAmelCase_ : str = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Axis must be a str''' )
UpperCAmelCase_ : Optional[Any] = locals()
del input_variables["axis"]
if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ):
UpperCAmelCase_ : List[Any] = (
'''Input values except axis must either be float or int: '''
f'''{list(input_variables.values() )}'''
)
raise TypeError(_lowercase )
UpperCAmelCase_ : Dict = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
UpperCAmelCase_ : Optional[int] = x * math.cos(_lowercase ) - y * math.sin(_lowercase )
UpperCAmelCase_ : List[Any] = y * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z
elif axis == "x":
UpperCAmelCase_ : Any = y * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : int = z * math.cos(_lowercase ) + y * math.sin(_lowercase )
UpperCAmelCase_ : Dict = x
elif axis == "y":
UpperCAmelCase_ : Union[str, Any] = x * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""") | 30 | 0 |
from functools import reduce
SCREAMING_SNAKE_CASE: int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def _a ( lowerCAmelCase = N )-> List[Any]:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowerCAmelCase , lowerCAmelCase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) )
for i in range(len(_lowercase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""") | 360 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__a = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__a = concatenate_datasets
__a = DownloadConfig
__a = DownloadManager
__a = DownloadMode
__a = DownloadConfig
__a = DownloadMode
__a = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 30 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 426 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
while a != 0:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = b % a, a
return b
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if gcd(_lowercase , _lowercase ) != 1:
UpperCAmelCase_ : int = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowercase )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = 1, 0, a
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = 0, 1, m
while va != 0:
UpperCAmelCase_ : List[Any] = ua // va
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 30 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : List[Any] = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
__lowercase : Optional[int] = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] ):
__a : List[Any] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__a : Union[str, Any] = int(re.match(r'.*layer_(\d*).*' , _lowercase )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int ):
if dtype == torch.bool:
return 1 / 8
__a : Any = re.search(r'[^\d](\d+)$' , str(_lowercase ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
__a : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
if bloom_config_file == "":
__a : Tuple = BloomConfig()
else:
__a : Optional[int] = BloomConfig.from_json_file(_lowercase )
if shard_model:
__a : Any = os.listdir(_lowercase )
__a : Union[str, Any] = sorted(filter(lambda _SCREAMING_SNAKE_CASE : s.startswith('layer' ) and "model_00" in s , _lowercase ) )
__a : Any = {'''weight_map''': {}, '''metadata''': {}}
__a : List[str] = 0
__a : Any = None
__a : Optional[int] = BloomConfig()
for j, file in enumerate(_lowercase ):
print('Processing file: {}'.format(_lowercase ) )
__a : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
__a : Tuple = file.replace('model_00' , F"""model_0{i}""" )
__a : Any = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='cpu' )
# Rename keys in the transformers names
__a : Dict = list(temp.keys() )
for key in keys:
__a : Union[str, Any] = temp.pop(_lowercase )
if tensors is None:
__a : Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__a : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__a : Tuple = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__a : List[str] = tensors[key] / pretraining_tp
torch.save(
_lowercase , os.path.join(
_lowercase , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
__a : Union[str, Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__a : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) )
__a : List[Any] = BloomConfig()
__a : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
__a : List[str] = total_size
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_lowercase , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
__a : Optional[Any] = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n'''
f.write(_lowercase )
else:
__a : Any = BloomModel(_lowercase )
__a : Tuple = os.listdir(_lowercase )
__a : Union[str, Any] = sorted(filter(lambda _SCREAMING_SNAKE_CASE : s.startswith('layer' ) and "model_00" in s , _lowercase ) )
__a : Any = None
for i, file in enumerate(_lowercase ):
__a : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
__a : List[Any] = file.replace('model_00' , F"""model_0{i}""" )
__a : Optional[int] = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='cpu' )
# Rename keys in the transformers names
__a : str = list(temp.keys() )
for key in keys:
__a : Dict = temp.pop(_lowercase )
if tensors is None:
__a : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__a : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__a : List[str] = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__a : Dict = tensors[key] / pretraining_tp
__a : Tuple = model.load_state_dict(_lowercase , strict=_lowercase )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
__a : Union[str, Any] = set(other_keys.missing_keys )
else:
__a : Dict = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(_lowercase , exist_ok=_lowercase )
__a : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__a : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
__a : Optional[int] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _lowercase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
__lowercase : List[Any] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 476 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCAmelCase = '''CIDAS/clipseg-rd64-refined'''
lowerCAmelCase = '''image_segmenter'''
lowerCAmelCase = CLIPSegForImageSegmentation
lowerCAmelCase = ['''image''', '''text''']
lowerCAmelCase = ['''image''']
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self ,['''vision'''] )
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.pre_processor(text=[label] ,images=[image] ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
with torch.no_grad():
UpperCAmelCase_ : Dict = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : Dict = outputs.cpu().detach().numpy()
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) ) | 30 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase__ ( _a ):
@slow
@require_torch
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowerCamelCase__: Tuple = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
lowerCamelCase__: Union[str, Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
lowerCamelCase__: Optional[int] = bertabert.config.encoder.vocab_size
lowerCamelCase__: Any = tokenizer.sep_token_id
lowerCamelCase__: Optional[int] = tokenizer.cls_token_id
lowerCamelCase__: List[Any] = 128
lowerCamelCase__: int = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
lowerCamelCase__: str = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
lowerCamelCase__: str = train_dataset.select(range(32 ) )
lowerCamelCase__: int = val_dataset.select(range(16 ) )
lowerCamelCase__: Optional[Any] = 4
def _map_to_encoder_decoder_inputs(__a : Union[str, Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCamelCase__: List[str] = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , max_length=512 )
lowerCamelCase__: str = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , max_length=128 )
lowerCamelCase__: List[str] = inputs.input_ids
lowerCamelCase__: Optional[Any] = inputs.attention_mask
lowerCamelCase__: Optional[Any] = outputs.input_ids
lowerCamelCase__: Optional[int] = outputs.input_ids.copy()
lowerCamelCase__: Any = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
lowerCamelCase__: Dict = outputs.attention_mask
assert all(len(_SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(_SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__a : Tuple ):
lowerCamelCase__: List[str] = pred.label_ids
lowerCamelCase__: Dict = pred.predictions
# all unnecessary tokens are removed
lowerCamelCase__: List[str] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCamelCase__: List[str] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
lowerCamelCase__: str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_SCREAMING_SNAKE_CASE ) )] ) / len(_SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
lowerCamelCase__: Union[str, Any] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
lowerCamelCase__: List[str] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
lowerCamelCase__: Optional[int] = self.get_auto_remove_tmp_dir()
lowerCamelCase__: List[Any] = SeqaSeqTrainingArguments(
output_dir=_SCREAMING_SNAKE_CASE , per_device_train_batch_size=_SCREAMING_SNAKE_CASE , per_device_eval_batch_size=_SCREAMING_SNAKE_CASE , predict_with_generate=_SCREAMING_SNAKE_CASE , evaluation_strategy="""steps""" , do_train=_SCREAMING_SNAKE_CASE , do_eval=_SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowerCamelCase__: int = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 306 |
import numpy as np
import datasets
__a = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
__a = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
__a = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' ,id='''sequence''' ) ,id='''X''' ),
} ) ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# convert to numpy arrays
UpperCAmelCase_ : str = np.array(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.array(_SCREAMING_SNAKE_CASE )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
UpperCAmelCase_ : List[str] = X - np.mean(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = np.cov(reference_distribution.T )
try:
UpperCAmelCase_ : Any = np.linalg.inv(_SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
UpperCAmelCase_ : List[str] = np.linalg.pinv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = np.dot(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.dot(_SCREAMING_SNAKE_CASE ,X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 30 | 0 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
_lowercase: int = Mock()
_lowercase: Dict = conn, Mock()
_lowercase: Tuple = iter([1, None] )
_lowercase: int = lambda __magic_name__ : next(_lowercase )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=_lowercase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 226 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a = logging.get_logger(__name__)
__a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__a = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__a = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__a = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__a = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__a = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__a = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_a )
class __a:
"""simple docstring"""
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
elif titles is None or texts is None:
UpperCAmelCase_ : List[str] = titles if texts is None else texts
return super().__call__(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[Any] = titles if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [titles]
UpperCAmelCase_ : List[str] = texts if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [texts]
UpperCAmelCase_ : Any = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = questions if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [questions] * n_passages
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f'''There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.''' )
UpperCAmelCase_ : Tuple = super().__call__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : int = super().__call__(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids''']
UpperCAmelCase_ : Optional[int] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
UpperCAmelCase_ : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCAmelCase_ : Dict = attention_mask
return self.pad(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 4 ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = reader_input['''input_ids''']
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = reader_output[:3]
UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = sorted(range(_SCREAMING_SNAKE_CASE ) ,reverse=_SCREAMING_SNAKE_CASE ,key=relevance_logits.__getitem__ )
UpperCAmelCase_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
UpperCAmelCase_ : List[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id )
else:
UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_SCREAMING_SNAKE_CASE ,top_spans=_SCREAMING_SNAKE_CASE ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_SCREAMING_SNAKE_CASE ,start_index=_SCREAMING_SNAKE_CASE ,end_index=_SCREAMING_SNAKE_CASE ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[DPRSpanPrediction]:
UpperCAmelCase_ : Tuple = []
for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCAmelCase_ : int = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] ,reverse=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
UpperCAmelCase_ : str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase = ['''input_ids''', '''attention_mask'''] | 30 | 0 |
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class lowercase__:
"""simple docstring"""
pass
| 97 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__a = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 | 0 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE = """\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"""
SCREAMING_SNAKE_CASE = """\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"""
SCREAMING_SNAKE_CASE = R"""\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {\'accuracy\': 1.0}\n"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def UpperCamelCase ( self : Tuple , snake_case__ : Tuple , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = 0.0
for i, j in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
n_correct += 1.0 if math_equivalence.is_equiv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 0.0
UpperCAmelCase__ : str = n_correct / len(_SCREAMING_SNAKE_CASE )
return {
"accuracy": accuracy,
}
| 199 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''wav2vec2'''
def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) ,_SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.05 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=320 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="sum" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1_500) ,_SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = hidden_size
UpperCAmelCase_ : Tuple = feat_extract_norm
UpperCAmelCase_ : List[Any] = feat_extract_activation
UpperCAmelCase_ : str = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = conv_bias
UpperCAmelCase_ : str = num_conv_pos_embeddings
UpperCAmelCase_ : Any = num_conv_pos_embedding_groups
UpperCAmelCase_ : Tuple = len(self.conv_dim )
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Dict = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : str = hidden_dropout
UpperCAmelCase_ : int = attention_dropout
UpperCAmelCase_ : Tuple = activation_dropout
UpperCAmelCase_ : List[str] = feat_proj_dropout
UpperCAmelCase_ : int = final_dropout
UpperCAmelCase_ : Union[str, Any] = layerdrop
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : Optional[int] = do_stable_layer_norm
UpperCAmelCase_ : Optional[int] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase_ : Optional[int] = apply_spec_augment
UpperCAmelCase_ : Tuple = mask_time_prob
UpperCAmelCase_ : Optional[Any] = mask_time_length
UpperCAmelCase_ : Union[str, Any] = mask_time_min_masks
UpperCAmelCase_ : Optional[Any] = mask_feature_prob
UpperCAmelCase_ : str = mask_feature_length
UpperCAmelCase_ : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCAmelCase_ : Union[str, Any] = num_codevectors_per_group
UpperCAmelCase_ : Any = num_codevector_groups
UpperCAmelCase_ : Union[str, Any] = contrastive_logits_temperature
UpperCAmelCase_ : List[str] = feat_quantizer_dropout
UpperCAmelCase_ : Dict = num_negatives
UpperCAmelCase_ : List[str] = codevector_dim
UpperCAmelCase_ : List[str] = proj_codevector_dim
UpperCAmelCase_ : str = diversity_loss_weight
# ctc loss
UpperCAmelCase_ : List[Any] = ctc_loss_reduction
UpperCAmelCase_ : List[str] = ctc_zero_infinity
# adapter
UpperCAmelCase_ : Optional[Any] = add_adapter
UpperCAmelCase_ : Any = adapter_kernel_size
UpperCAmelCase_ : Optional[int] = adapter_stride
UpperCAmelCase_ : List[Any] = num_adapter_layers
UpperCAmelCase_ : Optional[Any] = output_hidden_size or hidden_size
UpperCAmelCase_ : Optional[int] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase_ : List[str] = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = xvector_output_dim
@property
def a__ ( self ) -> Any:
return functools.reduce(operator.mul ,self.conv_stride ,1 ) | 30 | 0 |
import argparse
__lowerCAmelCase : Optional[Any] ='docs/source/_static/js/custom.js'
def _UpperCamelCase ( lowercase__ ):
with open(_lowercase , encoding='''utf-8''' , newline='''\n''' ) as f:
__SCREAMING_SNAKE_CASE : List[Any] = f.readlines()
__SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
__SCREAMING_SNAKE_CASE : Optional[int] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(_lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_lowercase )
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
__lowerCAmelCase : int =parser.parse_args()
update_custom_js(args.version)
| 696 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 30 | 0 |
_SCREAMING_SNAKE_CASE : Optional[Any] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if not isinstance(_lowercase ,_lowercase ):
snake_case = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
snake_case = ''''''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
snake_case = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
snake_case = B'''=''' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
snake_case = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] ,2 )]
for index in range(0 ,len(_lowercase ) ,6 ) ).encode()
+ padding
)
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if not isinstance(_lowercase ,_lowercase ) and not isinstance(_lowercase ,_lowercase ):
snake_case = (
'''argument should be a bytes-like object or ASCII string, '''
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase ,_lowercase ):
try:
snake_case = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
snake_case = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
snake_case = encoded_data[:-padding]
snake_case = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
snake_case = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
snake_case = [
int(binary_stream[index : index + 8] ,2 )
for index in range(0 ,len(_lowercase ) ,8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 550 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1 ) -> Dict:
UpperCAmelCase_ : List[Any] = tokenizer
UpperCAmelCase_ : int = dataset
UpperCAmelCase_ : Dict = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
UpperCAmelCase_ : Optional[int] = n_copies
def __iter__( self ) -> Any:
UpperCAmelCase_ : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : str = start_length
UpperCAmelCase_ : Optional[int] = eof_strings
UpperCAmelCase_ : str = tokenizer
def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCAmelCase_ : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = defaultdict(_lowercase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowercase ) ):
with torch.no_grad():
UpperCAmelCase_ : Dict = batch['''ids'''].shape[-1]
UpperCAmelCase_ : Optional[Any] = accelerator.unwrap_model(_lowercase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase )
# each task is generated batch_size times
UpperCAmelCase_ : Union[str, Any] = batch['''task_id'''].repeat(_lowercase )
UpperCAmelCase_ : Dict = accelerator.pad_across_processes(
_lowercase , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCAmelCase_, UpperCAmelCase_ : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase_ : Union[str, Any] = generated_tokens.cpu().numpy()
UpperCAmelCase_ : Union[str, Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowercase , _lowercase ):
gen_token_dict[task].append(_lowercase )
UpperCAmelCase_ : Union[str, Any] = [[] for _ in range(_lowercase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase_ : int = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
code_gens[task].append(remove_last_block(_lowercase ) )
return code_gens
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = HfArgumentParser(_lowercase )
UpperCAmelCase_ : int = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase_ : Optional[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase_ : List[Any] = '''false'''
if args.num_workers is None:
UpperCAmelCase_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase_ : int = Accelerator()
set_seed(args.seed , device_specific=_lowercase )
# Load model and tokenizer
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase_ : Any = tokenizer.eos_token
UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase_ : str = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase_ : Tuple = load_dataset('''openai_humaneval''' )
UpperCAmelCase_ : Dict = load_metric('''code_eval''' )
UpperCAmelCase_ : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
UpperCAmelCase_ : str = args.n_samples // args.batch_size
UpperCAmelCase_ : str = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase_ : Optional[Any] = DataLoader(_lowercase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
UpperCAmelCase_, UpperCAmelCase_ : int = accelerator.prepare(_lowercase , _lowercase )
UpperCAmelCase_ : int = complete_code(
_lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , )
if accelerator.is_main_process:
UpperCAmelCase_ : Any = []
for task in tqdm(range(_lowercase ) ):
UpperCAmelCase_ : int = human_eval['''test'''][task]['''test''']
UpperCAmelCase_ : str = f'''check({human_eval["test"][task]["entry_point"]})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase_, UpperCAmelCase_ : Any = code_eval_metric.compute(
references=_lowercase , predictions=_lowercase , num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(_lowercase , _lowercase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main() | 30 | 0 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 270 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__a = logging.get_logger(__name__)
__a = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''imagegpt'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=512 + 1 ,_SCREAMING_SNAKE_CASE=32 * 32 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : Union[str, Any] = n_embd
UpperCAmelCase_ : Any = n_layer
UpperCAmelCase_ : Optional[Any] = n_head
UpperCAmelCase_ : Union[str, Any] = n_inner
UpperCAmelCase_ : List[Any] = activation_function
UpperCAmelCase_ : List[str] = resid_pdrop
UpperCAmelCase_ : str = embd_pdrop
UpperCAmelCase_ : Optional[Any] = attn_pdrop
UpperCAmelCase_ : Dict = layer_norm_epsilon
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Dict = scale_attn_weights
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : List[str] = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ : Tuple = reorder_and_upcast_attn
UpperCAmelCase_ : int = tie_word_embeddings
super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
class __a( _a ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,) -> Mapping[str, Any]:
UpperCAmelCase_ : Any = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) )
return inputs | 30 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
lowerCAmelCase_ = [
'good first issue',
'feature request',
'wip',
]
def A__ ( ):
'''simple docstring'''
UpperCamelCase : str = Github(os.environ["GITHUB_TOKEN"])
UpperCamelCase : Union[str, Any] = g.get_repo("huggingface/accelerate")
UpperCamelCase : str = repo.get_issues(state="open")
for issue in open_issues:
UpperCamelCase : Dict = sorted([comment for comment in issue.get_comments()] , key=lambda A: i.created_at , reverse=_lowercase)
UpperCamelCase : Optional[Any] = comments[0] if len(_lowercase) > 0 else None
UpperCamelCase : Optional[Any] = dt.utcnow()
UpperCamelCase : List[Any] = (current_time - issue.updated_at).days
UpperCamelCase : str = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="closed")
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# Add stale comment
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) "
"are likely to be ignored.")
if __name__ == "__main__":
main()
| 173 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__a = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
__a = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase_ : Union[str, Any] = int(re.match(r'''.*layer_(\d*).*''' , _lowercase )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase_ : Any = re.search(r'''[^\d](\d+)$''' , str(_lowercase ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
UpperCAmelCase_ : Optional[int] = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if bloom_config_file == "":
UpperCAmelCase_ : Tuple = BloomConfig()
else:
UpperCAmelCase_ : Optional[int] = BloomConfig.from_json_file(_lowercase )
if shard_model:
UpperCAmelCase_ : Any = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = {'''weight_map''': {}, '''metadata''': {}}
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[int] = BloomConfig()
for j, file in enumerate(_lowercase ):
print('''Processing file: {}'''.format(_lowercase ) )
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : Tuple = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Any = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : Dict = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Union[str, Any] = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : Union[str, Any] = temp
else:
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : Tuple = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : List[str] = tensors[key] / pretraining_tp
torch.save(
_lowercase , os.path.join(
_lowercase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase_ : Union[str, Any] = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase_ : List[str] = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_lowercase ) ).zfill(5 ) )
UpperCAmelCase_ : List[Any] = BloomConfig()
UpperCAmelCase_ : Tuple = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ : List[str] = total_size
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_lowercase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ : Optional[Any] = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + '''\n'''
f.write(_lowercase )
else:
UpperCAmelCase_ : Any = BloomModel(_lowercase )
UpperCAmelCase_ : Tuple = os.listdir(_lowercase )
UpperCAmelCase_ : Union[str, Any] = sorted(filter(lambda _lowercase : s.startswith('''layer''' ) and "model_00" in s , _lowercase ) )
UpperCAmelCase_ : Any = None
for i, file in enumerate(_lowercase ):
UpperCAmelCase_ : Optional[Any] = None
for i in range(_lowercase ):
# load all TP files
UpperCAmelCase_ : List[Any] = file.replace('''model_00''' , f'''model_0{i}''' )
UpperCAmelCase_ : Optional[int] = torch.load(os.path.join(_lowercase , _lowercase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ : str = list(temp.keys() )
for key in keys:
UpperCAmelCase_ : Dict = temp.pop(_lowercase )
if tensors is None:
UpperCAmelCase_ : int = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ : Optional[int] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ : List[str] = torch.cat([tensors[key], temp[key]] , dim=_lowercase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_lowercase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ : Dict = tensors[key] / pretraining_tp
UpperCAmelCase_ : Tuple = model.load_state_dict(_lowercase , strict=_lowercase )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
UpperCAmelCase_ : Union[str, Any] = set(other_keys.missing_keys )
else:
UpperCAmelCase_ : Dict = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCAmelCase_ : str = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ : Dict = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
UpperCAmelCase_ : Optional[int] = model.to(config.torch_dtype )
torch.save(model.state_dict() , _lowercase )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
__a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
) | 30 | 0 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowercase_ (_a ):
def __init__( self : Dict , snake_case__ : str="" , snake_case__ : List[Any]="train" ):
"""simple docstring"""
assert os.path.isdir(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = os.listdir(_SCREAMING_SNAKE_CASE )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
SCREAMING_SNAKE_CASE_ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ):
continue
self.documents.append(_SCREAMING_SNAKE_CASE )
def __len__( self : Dict ):
"""simple docstring"""
return len(self.documents )
def __getitem__( self : List[Any] , snake_case__ : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.documents[idx]
SCREAMING_SNAKE_CASE_ = document_path.split('/' )[-1]
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as source:
SCREAMING_SNAKE_CASE_ = source.read()
SCREAMING_SNAKE_CASE_ = process_story(_SCREAMING_SNAKE_CASE )
return document_name, story_lines, summary_lines
def _a ( lowerCAmelCase )-> List[str]:
SCREAMING_SNAKE_CASE_ = list(filter(lambda lowerCAmelCase : len(_lowercase ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
SCREAMING_SNAKE_CASE_ = [_add_missing_period(_lowercase ) for line in nonempty_lines]
# gather article lines
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = deque(_lowercase )
while True:
try:
SCREAMING_SNAKE_CASE_ = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(_lowercase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
SCREAMING_SNAKE_CASE_ = list(filter(lambda lowerCAmelCase : not t.startswith('@highlight' ) , _lowercase ) )
return story_lines, summary_lines
def _a ( lowerCAmelCase )-> List[Any]:
SCREAMING_SNAKE_CASE_ = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )-> str:
if len(_lowercase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(_lowercase )) )
return sequence
def _a ( lowerCAmelCase , lowerCAmelCase )-> List[Any]:
SCREAMING_SNAKE_CASE_ = torch.ones_like(_lowercase )
SCREAMING_SNAKE_CASE_ = sequence == pad_token_id
SCREAMING_SNAKE_CASE_ = 0
return mask
def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )-> Dict:
SCREAMING_SNAKE_CASE_ = [tokenizer.encode(_lowercase ) for line in story_lines]
SCREAMING_SNAKE_CASE_ = [token for sentence in story_lines_token_ids for token in sentence]
SCREAMING_SNAKE_CASE_ = [tokenizer.encode(_lowercase ) for line in summary_lines]
SCREAMING_SNAKE_CASE_ = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _a ( lowerCAmelCase , lowerCAmelCase )-> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = []
for sequence in batch:
SCREAMING_SNAKE_CASE_ = -1
SCREAMING_SNAKE_CASE_ = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(_lowercase )
return torch.tensor(_lowercase ) | 360 |
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
for i in range(1 , 1001 ):
total += i**i
return str(_lowercase )[-10:]
if __name__ == "__main__":
print(solution()) | 30 | 0 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
SCREAMING_SNAKE_CASE_ = '''\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'''
SCREAMING_SNAKE_CASE_ = '''\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'''
SCREAMING_SNAKE_CASE_ = '''\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'''
SCREAMING_SNAKE_CASE_ = '''\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'''
SCREAMING_SNAKE_CASE_ = '''The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> Optional[Any]:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def __A ( self , snake_case_ , snake_case_ , snake_case_=[1, 10, 100] , snake_case_=4 , snake_case_=3.0 ) -> int:
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=_SCREAMING_SNAKE_CASE ) as executor:
_UpperCAmelCase = []
_UpperCAmelCase = Counter()
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(_SCREAMING_SNAKE_CASE )
for task_id, (candidates, test_case) in enumerate(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
for candidate in candidates:
_UpperCAmelCase = candidate + '''\n''' + test_case
_UpperCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
_UpperCAmelCase = executor.submit(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
futures.append(_SCREAMING_SNAKE_CASE )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
_UpperCAmelCase = [], []
for result in results.values():
result.sort()
_UpperCAmelCase = [r[1]['''passed'''] for r in result]
total.append(len(_SCREAMING_SNAKE_CASE ) )
correct.append(sum(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = np.array(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = np.array(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = k
_UpperCAmelCase = {F"""pass@{k}""": estimate_pass_at_k(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A__ ( A__ , A__ , A__ ) -> int:
'''simple docstring'''
def estimator(A__ , A__ , A__ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_lowercase , _lowercase ):
_UpperCAmelCase = itertools.repeat(_lowercase , len(_lowercase ) )
else:
assert len(_lowercase ) == len(_lowercase )
_UpperCAmelCase = iter(_lowercase )
return np.array([estimator(int(_lowercase ) , int(_lowercase ) , _lowercase ) for n, c in zip(_lowercase , _lowercase )] )
| 426 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__a = None
__a = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__a = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = None
# Automatically constructed
lowerCAmelCase = "PIL.Image.Image"
lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase = field(default='''Image''' , init=_a , repr=_a )
def __call__( self ) -> Tuple:
return self.pa_type
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
UpperCAmelCase_ : Dict = {}
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Dict = path.split('''::''' )[-1]
try:
UpperCAmelCase_ : Optional[int] = string_to_dict(_SCREAMING_SNAKE_CASE ,config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase_ : Tuple = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
UpperCAmelCase_ : Optional[Any] = None
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ,use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
UpperCAmelCase_ : List[str] = BytesIO(f.read() )
UpperCAmelCase_ : Optional[Any] = PIL.Image.open(bytes_ )
else:
UpperCAmelCase_ : List[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCAmelCase_ : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase_ : Dict = storage.field('''bytes''' )
else:
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase_ : int = storage.field('''path''' )
else:
UpperCAmelCase_ : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f:
UpperCAmelCase_ : Any = f.read()
return bytes_
UpperCAmelCase_ : Union[str, Any] = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
UpperCAmelCase_ : List[str] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,)
UpperCAmelCase_ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def lowerCamelCase__ ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase_ : Optional[int] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase_ : int = image.format
else:
UpperCAmelCase_ : List[Any] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(_lowercase , format=_lowercase )
return buffer.getvalue()
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if hasattr(_lowercase , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
UpperCAmelCase_ : Tuple = array.dtype
UpperCAmelCase_ : List[str] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
UpperCAmelCase_ : Dict = dtype.kind
UpperCAmelCase_ : Union[str, Any] = dtype.itemsize
UpperCAmelCase_ : Optional[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase_ : Tuple = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase_ : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase_ : Union[str, Any] = dtype_byteorder + dtype_kind + str(_lowercase )
UpperCAmelCase_ : str = np.dtype(_lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCAmelCase_ : Any = PIL.Image.fromarray(array.astype(_lowercase ) )
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
UpperCAmelCase_, UpperCAmelCase_ : Tuple = first_non_null_value(_lowercase )
if isinstance(_lowercase , _lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowercase , np.ndarray ):
UpperCAmelCase_ : Any = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
elif isinstance(_lowercase , PIL.Image.Image ):
UpperCAmelCase_ : Union[str, Any] = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
else:
return objs
else:
return objs | 30 | 0 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase : Tuple = None
__lowercase : List[str] = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase : Union[str, Any] = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __UpperCamelCase :
A_ = True
A_ = None
# Automatically constructed
A_ = "PIL.Image.Image"
A_ = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
A_ = field(default="Image" , init=_a , repr=_a )
def __call__( self ):
'''simple docstring'''
return self.pa_type
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : List[str] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
f"""An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.""" )
def __UpperCAmelCase ( self , __a , __a=None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
__a : Dict = {}
__a : Union[str, Any] = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f"""An image should have one of \'path\' or \'bytes\' but both are None in {value}.""" )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
__a : Tuple = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
__a : Dict = path.split('::' )[-1]
try:
__a : Optional[int] = string_to_dict(_SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )['''repo_id''']
__a : Tuple = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
__a : Optional[Any] = None
with xopen(_SCREAMING_SNAKE_CASE , 'rb' , use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
__a : List[str] = BytesIO(f.read() )
__a : Optional[Any] = PIL.Image.open(bytes_ )
else:
__a : List[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __UpperCAmelCase ( self ):
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
__a : Dict = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
__a : Dict = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__a : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
__a : Tuple = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
__a : Dict = storage.field('bytes' )
else:
__a : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
__a : int = storage.field('path' )
else:
__a : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
__a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__a : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__a : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
__a : Dict = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__a ):
with xopen(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
__a : Any = f.read()
return bytes_
__a : Union[str, Any] = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__a : List[str] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
__a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def lowerCamelCase ():
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__a : Optional[int] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
__a : int = image.format
else:
__a : List[Any] = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(_lowercase , format=_lowercase )
return buffer.getvalue()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
if hasattr(_lowercase , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
__a : Tuple = array.dtype
__a : List[str] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
__a : Dict = dtype.kind
__a : Union[str, Any] = dtype.itemsize
__a : Optional[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__a : Tuple = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__a : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__a : Union[str, Any] = dtype_byteorder + dtype_kind + str(_lowercase )
__a : str = np.dtype(_lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
__a : Any = PIL.Image.fromarray(array.astype(_lowercase ) )
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
__a : Tuple = first_non_null_value(_lowercase )
if isinstance(_lowercase , _lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowercase , np.ndarray ):
__a : Any = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
elif isinstance(_lowercase , PIL.Image.Image ):
__a : Union[str, Any] = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
else:
return objs
else:
return objs
| 476 |
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
UpperCAmelCase_ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(_SCREAMING_SNAKE_CASE )
from datasets import load_dataset
UpperCAmelCase_ : Optional[int] = load_dataset('''nielsr/rvlcdip-demo''' )
UpperCAmelCase_ : Optional[Any] = dataset['''train'''][0]['''image'''].convert('''RGB''' )
UpperCAmelCase_ : str = image_processor(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = outputs.logits
UpperCAmelCase_ : Tuple = torch.Size((1, 16) )
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] ,device=_SCREAMING_SNAKE_CASE ,dtype=torch.float ,)
self.assertTrue(torch.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) | 30 | 0 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowercase = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowerCamelCase__: List[Any] = 0
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: Any = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: Dict = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
lowerCamelCase__: str = os.path.join(_SCREAMING_SNAKE_CASE , """fake-roberta""" )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
lowerCamelCase__: Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertEqual(type(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" , _SCREAMING_SNAKE_CASE )
# Wrong model type will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoConfig.register("""model""" , _SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoConfig.register("""bert""" , _SCREAMING_SNAKE_CASE )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCamelCase__: List[Any] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase__: List[str] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , """bert-base is not a local folder and is not a valid model identifier""" ):
lowerCamelCase__: Dict = AutoConfig.from_pretrained("""bert-base""" )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCamelCase__: Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE , revision="""aaaaaa""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
lowerCamelCase__: Dict = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCamelCase__: Dict = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
lowerCamelCase__: Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
lowerCamelCase__: str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase__: int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
class lowerCamelCase__ ( _a ):
__lowerCamelCase = """new-model"""
try:
AutoConfig.register("""new-model""" , _SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
lowerCamelCase__: List[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
lowerCamelCase__: Dict = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
lowerCamelCase__: Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 306 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) | 30 | 0 |
import colorsys
from PIL import Image # type: ignore
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
_lowercase: Union[str, Any] = x
_lowercase: List[str] = y
for step in range(_lowercase ): # noqa: B007
_lowercase: Union[str, Any] = a * a - b * b + x
_lowercase: List[Any] = 2 * a * b + y
_lowercase: List[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __lowerCAmelCase ( __magic_name__ ):
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def __lowerCAmelCase ( __magic_name__ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_lowercase , 1 , 1 ) )
def __lowerCAmelCase ( __magic_name__ = 8_0_0 , __magic_name__ = 6_0_0 , __magic_name__ = -0.6 , __magic_name__ = 0 , __magic_name__ = 3.2 , __magic_name__ = 5_0 , __magic_name__ = True , ):
_lowercase: int = Image.new("RGB" , (image_width, image_height) )
_lowercase: List[str] = img.load()
# loop through the image-coordinates
for image_x in range(_lowercase ):
for image_y in range(_lowercase ):
# determine the figure-coordinates based on the image-coordinates
_lowercase: str = figure_width / image_width * image_height
_lowercase: Dict = figure_center_x + (image_x / image_width - 0.5) * figure_width
_lowercase: Any = figure_center_y + (image_y / image_height - 0.5) * figure_height
_lowercase: int = get_distance(_lowercase , _lowercase , _lowercase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_lowercase: Dict = get_color_coded_rgb(_lowercase )
else:
_lowercase: int = get_black_and_white_rgb(_lowercase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_SCREAMING_SNAKE_CASE : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 226 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=0.9 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,_SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] ,) -> Optional[int]:
UpperCAmelCase_ : int = size if size is not None else {'''shortest_edge''': 30}
UpperCAmelCase_ : List[str] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Any = min_resolution
UpperCAmelCase_ : Tuple = max_resolution
UpperCAmelCase_ : Optional[int] = do_resize_and_center_crop
UpperCAmelCase_ : Tuple = size
UpperCAmelCase_ : List[str] = crop_pct
UpperCAmelCase_ : List[str] = crop_size
UpperCAmelCase_ : Any = do_normalize
UpperCAmelCase_ : str = image_mean
UpperCAmelCase_ : List[Any] = image_std
def a__ ( self ) -> str:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = PoolFormerImageProcessor if is_vision_available() else None
def a__ ( self ) -> Dict:
UpperCAmelCase_ : str = PoolFormerImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''size''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''crop_pct''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''do_normalize''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_mean''' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE ,'''image_std''' ) )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size ,{'''height''': 30, '''width''': 30} )
UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size ,{'''height''': 84, '''width''': 84} )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> Dict:
# Initialize image_processing
UpperCAmelCase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,Image.Image )
# Test not batched input
UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> List[Any]:
# Initialize image_processing
UpperCAmelCase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ : List[str] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,np.ndarray )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
def a__ ( self ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_SCREAMING_SNAKE_CASE ,torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,torch.Tensor )
# Test not batched input
UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,)
# Test batched
UpperCAmelCase_ : List[Any] = image_processing(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) ,) | 30 | 0 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__a = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowercase__( _a ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Dict=1 ) -> Dict:
lowercase_ = tokenizer
lowercase_ = dataset
lowercase_ = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
lowercase_ = n_copies
def __iter__( self : Union[str, Any] ) -> Any:
lowercase_ = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
lowercase_ = self.tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowercase__( _a ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]:
lowercase_ = start_length
lowercase_ = eof_strings
lowercase_ = tokenizer
def __call__( self : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[Any]:
lowercase_ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowercase_ = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_SCREAMING_SNAKE_CASE )
def a ( snake_case__: Dict ):
'''simple docstring'''
lowercase_ = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase )
# last string should be ""
return "".join(string_list[:-2] )
def a ( snake_case__: int , snake_case__: str , snake_case__: List[str] , snake_case__: Optional[Any] , snake_case__: List[str] , snake_case__: str=20 , **snake_case__: int ):
'''simple docstring'''
lowercase_ = defaultdict(_lowercase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowercase ) ):
with torch.no_grad():
lowercase_ = batch['''ids'''].shape[-1]
lowercase_ = accelerator.unwrap_model(_lowercase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase )
# each task is generated batch_size times
lowercase_ = batch['''task_id'''].repeat(_lowercase )
lowercase_ = accelerator.pad_across_processes(
_lowercase , dim=1 , pad_index=tokenizer.pad_token_id )
lowercase_ = accelerator.gather((generated_tokens, generated_tasks) )
lowercase_ = generated_tokens.cpu().numpy()
lowercase_ = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowercase , _lowercase ):
gen_token_dict[task].append(_lowercase )
lowercase_ = [[] for _ in range(_lowercase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowercase_ = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
code_gens[task].append(remove_last_block(_lowercase ) )
return code_gens
def a ( ):
'''simple docstring'''
lowercase_ = HfArgumentParser(_lowercase )
lowercase_ = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowercase_ = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowercase_ = '''false'''
if args.num_workers is None:
lowercase_ = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowercase_ = Accelerator()
set_seed(args.seed , device_specific=_lowercase )
# Load model and tokenizer
lowercase_ = AutoTokenizer.from_pretrained(args.model_ckpt )
lowercase_ = tokenizer.eos_token
lowercase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowercase_ = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ),
}
# Load evaluation dataset and metric
lowercase_ = load_dataset('''openai_humaneval''' )
lowercase_ = load_metric('''code_eval''' )
lowercase_ = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
lowercase_ = args.n_samples // args.batch_size
lowercase_ = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowercase_ = DataLoader(_lowercase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowercase_ = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
lowercase_ = accelerator.prepare(_lowercase , _lowercase )
lowercase_ = complete_code(
_lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , )
if accelerator.is_main_process:
lowercase_ = []
for task in tqdm(range(_lowercase ) ):
lowercase_ = human_eval['''test'''][task]['''test''']
lowercase_ = F'''check({human_eval['test'][task]['entry_point']})'''
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
lowercase_ = code_eval_metric.compute(
references=_lowercase , predictions=_lowercase , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(_lowercase , _lowercase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 97 |
import unittest
import numpy as np
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = None , ):
'''simple docstring'''
UpperCAmelCase_ : Dict = np.shape(_lowercase )
UpperCAmelCase_ : Optional[Any] = np.shape(_lowercase )
UpperCAmelCase_ : Tuple = np.shape(_lowercase )
if shape_a[0] != shape_b[0]:
UpperCAmelCase_ : Tuple = (
'''Expected the same number of rows for A and B. '''
f'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(_lowercase )
if shape_b[1] != shape_c[1]:
UpperCAmelCase_ : List[Any] = (
'''Expected the same number of columns for B and C. '''
f'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(_lowercase )
UpperCAmelCase_ : Dict = pseudo_inv
if a_inv is None:
try:
UpperCAmelCase_ : Any = np.linalg.inv(_lowercase )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Any = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : List[str] = np.array([[2, 1], [6, 3]] )
UpperCAmelCase_ : Tuple = schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.block([[a, b], [b.T, c]] )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = np.linalg.det(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = np.linalg.det(_SCREAMING_SNAKE_CASE )
self.assertAlmostEqual(_SCREAMING_SNAKE_CASE ,det_a * det_s )
def a__ ( self ) -> None:
UpperCAmelCase_ : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> None:
UpperCAmelCase_ : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
UpperCAmelCase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
UpperCAmelCase_ : int = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
schur_complement(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main() | 30 | 0 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def snake_case_ ( lowercase__ , lowercase__ ):
UpperCAmelCase__ : int = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
UpperCAmelCase__ : List[Any] = DatasetInfosDict.from_directory(_lowercase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=4_2 , ),
] , )
def snake_case_ ( lowercase__ , lowercase__ ):
UpperCAmelCase__ : List[str] = str(_lowercase )
dataset_info.write_to_directory(_lowercase )
UpperCAmelCase__ : int = DatasetInfo.from_directory(_lowercase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_lowercase , "dataset_info.json" ) )
def snake_case_ ( ):
UpperCAmelCase__ : Tuple = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
UpperCAmelCase__ : List[str] = dataset_info._to_yaml_dict()
assert sorted(_lowercase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
UpperCAmelCase__ : str = yaml.safe_dump(_lowercase )
UpperCAmelCase__ : List[Any] = yaml.safe_load(_lowercase )
assert dataset_info_yaml_dict == reloaded
def snake_case_ ( ):
UpperCAmelCase__ : Optional[int] = DatasetInfo()
UpperCAmelCase__ : Union[str, Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=4_2 ),
"v2": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def snake_case_ ( lowercase__ , lowercase__ ):
UpperCAmelCase__ : Optional[int] = str(_lowercase )
dataset_infos_dict.write_to_directory(_lowercase )
UpperCAmelCase__ : Dict = DatasetInfosDict.from_directory(_lowercase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
UpperCAmelCase__ : List[str] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
UpperCAmelCase__ : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_lowercase , "README.md" ) )
| 199 |
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Union[str, Any] = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_lowercase )
UpperCAmelCase_ : Any = ''''''.join(bin(_lowercase )[2:].zfill(8 ) for byte in data )
UpperCAmelCase_ : Any = len(_lowercase ) % 6 != 0
if padding_needed:
# The padding that will be added later
UpperCAmelCase_ : Union[str, Any] = B'''=''' * ((6 - len(_lowercase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowercase ) % 6)
else:
UpperCAmelCase_ : int = B''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowercase ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ):
UpperCAmelCase_ : Tuple = (
'''argument should be a bytes-like object or ASCII string, '''
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_lowercase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowercase , _lowercase ):
try:
UpperCAmelCase_ : Any = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
UpperCAmelCase_ : str = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowercase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
UpperCAmelCase_ : List[Any] = encoded_data[:-padding]
UpperCAmelCase_ : List[Any] = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
UpperCAmelCase_ : Tuple = ''''''.join(
bin(B64_CHARSET.index(_lowercase ) )[2:].zfill(6 ) for char in encoded_data )
UpperCAmelCase_ : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowercase ) , 8 )
]
return bytes(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__lowerCAmelCase : Dict ={
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 0
@slow
def a__ ( self ) -> Any:
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) ,0 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,20 )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size ,12 )
def a__ ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.txt''' ) )
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''bert''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' ,os.path.join(_SCREAMING_SNAKE_CASE ,'''merges.txt''' ) )
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,tokenizer_type='''gpt2''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained('''./''' ,tokenizer_type='''xxx''' )
@require_tokenizers
def a__ ( self ) -> Optional[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase_ : Any = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
else:
self.assertEqual(tokenizer.do_lower_case ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
@require_tokenizers
def a__ ( self ) -> List[Any]:
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' ,):
UpperCAmelCase_ : int = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def a__ ( self ) -> Optional[Any]:
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
UpperCAmelCase_ : int = TOKENIZER_MAPPING.values()
UpperCAmelCase_ : List[Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Tuple:
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ,use_fast=_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) ,_SCREAMING_SNAKE_CASE )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = '''Hello, world. How are you?'''
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' ,do_lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual('''[UNK]''' ,tokens[0] )
@require_tokenizers
def a__ ( self ) -> Dict:
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.model_max_length ,512 )
self.assertEqual(tokenizer.vocab_size ,30_000 )
self.assertEqual(tokenizer.unk_token ,'''[UNK]''' )
self.assertEqual(tokenizer.padding_side ,'''right''' )
self.assertEqual(tokenizer.truncation_side ,'''right''' )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,(BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size ,12 )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
# Check we can load the tokenizer config of an online model.
UpperCAmelCase_ : int = get_tokenizer_config('''bert-base-cased''' )
UpperCAmelCase_ : Optional[int] = config.pop('''_commit_hash''' ,_SCREAMING_SNAKE_CASE )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_SCREAMING_SNAKE_CASE ,{'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase_ : Any = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
self.assertDictEqual(_SCREAMING_SNAKE_CASE ,{} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = get_tokenizer_config(_SCREAMING_SNAKE_CASE )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] ,'''BertTokenizer''' )
def a__ ( self ) -> str:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def a__ ( self ) -> int:
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
# Can register in two steps
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, None) )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] ,(CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : List[str] = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ ,'''NewTokenizer''' )
@require_tokenizers
def a__ ( self ) -> Optional[int]:
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = False
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = NewTokenizer
lowerCAmelCase = False
try:
AutoConfig.register('''custom''' ,_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,slow_tokenizer_class=_SCREAMING_SNAKE_CASE )
AutoTokenizer.register(_SCREAMING_SNAKE_CASE ,fast_tokenizer_class=_SCREAMING_SNAKE_CASE )
# If remote code is not set, the default is to use local
UpperCAmelCase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' ,trust_remote_code=_SCREAMING_SNAKE_CASE ,use_fast=_SCREAMING_SNAKE_CASE )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ ,'''NewTokenizer''' )
def a__ ( self ) -> Optional[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,'''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' )
def a__ ( self ) -> List[Any]:
with self.assertRaisesRegex(
_SCREAMING_SNAKE_CASE ,R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE ,revision='''aaaaaa''' )
def a__ ( self ) -> Any:
# Make sure we have cached the tokenizer.
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase_ : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count ,0 )
self.assertEqual(counter.head_request_count ,1 )
self.assertEqual(counter.other_request_count ,0 ) | 30 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"vinvino02/glpn-kitti": "https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( _a ):
"""simple docstring"""
__magic_name__ = 'glpn'
def __init__( self , __snake_case=3 , __snake_case=4 , __snake_case=[2, 2, 2, 2] , __snake_case=[8, 4, 2, 1] , __snake_case=[3_2, 6_4, 1_6_0, 2_5_6] , __snake_case=[7, 3, 3, 3] , __snake_case=[4, 2, 2, 2] , __snake_case=[1, 2, 5, 8] , __snake_case=[4, 4, 4, 4] , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=0.1 , __snake_case=1E-6 , __snake_case=6_4 , __snake_case=1_0 , __snake_case=-1 , **__snake_case , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case = num_channels
snake_case = num_encoder_blocks
snake_case = depths
snake_case = sr_ratios
snake_case = hidden_sizes
snake_case = patch_sizes
snake_case = strides
snake_case = mlp_ratios
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = drop_path_rate
snake_case = layer_norm_eps
snake_case = decoder_hidden_size
snake_case = max_depth
snake_case = head_in_index
| 550 |
from functools import reduce
__a = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def lowerCamelCase__ ( _lowercase = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _lowercase , _lowercase : str(int(_lowercase ) * int(_lowercase ) ) , n[i : i + 13] ) )
for i in range(len(_lowercase ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 30 | 0 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowercase_ ( unittest.TestCase ):
A_ = JukeboxTokenizer
A_ = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def _lowerCAmelCase ( self : List[str] ):
import torch
snake_case__ : int = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
snake_case__ : List[str] = tokenizer(**self.metas )['''input_ids''']
# fmt: off
snake_case__ : List[Any] = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _lowerCAmelCase ( self : Dict ):
import torch
snake_case__ : List[Any] = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
snake_case__ : int = tokenizer(**self.metas )['''input_ids''']
# fmt: off
snake_case__ : Dict = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 270 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
UpperCAmelCase_ : Tuple = precision
UpperCAmelCase_ : Optional[Any] = ceil(precision / 14 )
UpperCAmelCase_ : int = 426880 * Decimal(10005 ).sqrt()
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : List[Any] = 13591409
UpperCAmelCase_ : Optional[Any] = Decimal(_lowercase )
for k in range(1 , _lowercase ):
UpperCAmelCase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__a = 50
print(F"""The first {n} digits of pi is: {pi(n)}""") | 30 | 0 |
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
lowerCAmelCase_ = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
lowerCAmelCase_ = concatenate_datasets
lowerCAmelCase_ = DownloadConfig
lowerCAmelCase_ = DownloadManager
lowerCAmelCase_ = DownloadMode
lowerCAmelCase_ = DownloadConfig
lowerCAmelCase_ = DownloadMode
lowerCAmelCase_ = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 173 |
from __future__ import annotations
import math
__a = '2020.9.26'
__a = 'xcodz-dot, cclaus, dhruvmanila'
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not all(isinstance(_lowercase , (float, int) ) for val in locals().values() ):
UpperCAmelCase_ : Optional[int] = f'''Input values must either be float or int: {list(locals().values() )}'''
raise TypeError(_lowercase )
UpperCAmelCase_ : Tuple = ((x * distance) / (z + distance)) * scale
UpperCAmelCase_ : str = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Axis must be a str''' )
UpperCAmelCase_ : Optional[Any] = locals()
del input_variables["axis"]
if not all(isinstance(_lowercase , (float, int) ) for val in input_variables.values() ):
UpperCAmelCase_ : List[Any] = (
'''Input values except axis must either be float or int: '''
f'''{list(input_variables.values() )}'''
)
raise TypeError(_lowercase )
UpperCAmelCase_ : Dict = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
UpperCAmelCase_ : Optional[int] = x * math.cos(_lowercase ) - y * math.sin(_lowercase )
UpperCAmelCase_ : List[Any] = y * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z
elif axis == "x":
UpperCAmelCase_ : Any = y * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : int = z * math.cos(_lowercase ) + y * math.sin(_lowercase )
UpperCAmelCase_ : Dict = x
elif axis == "y":
UpperCAmelCase_ : Union[str, Any] = x * math.cos(_lowercase ) - z * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = z * math.cos(_lowercase ) + x * math.sin(_lowercase )
UpperCAmelCase_ : Optional[int] = y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(F"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""") | 30 | 0 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
SCREAMING_SNAKE_CASE: List[str] = {
'''cola''': 2,
'''mnli''': 3,
'''mrpc''': 2,
'''sst-2''': 2,
'''sts-b''': 1,
'''qqp''': 2,
'''qnli''': 2,
'''rte''': 2,
'''wnli''': 2,
}
logging.set_verbosity_info()
def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None )-> Tuple:
SCREAMING_SNAKE_CASE_ = XLNetConfig.from_json_file(_lowercase )
SCREAMING_SNAKE_CASE_ = finetuning_task.lower() if finetuning_task is not None else ''''''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
SCREAMING_SNAKE_CASE_ = finetuning_task
SCREAMING_SNAKE_CASE_ = GLUE_TASKS_NUM_LABELS[finetuning_task]
SCREAMING_SNAKE_CASE_ = XLNetForSequenceClassification(_lowercase )
elif "squad" in finetuning_task:
SCREAMING_SNAKE_CASE_ = finetuning_task
SCREAMING_SNAKE_CASE_ = XLNetForQuestionAnswering(_lowercase )
else:
SCREAMING_SNAKE_CASE_ = XLNetLMHeadModel(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
SCREAMING_SNAKE_CASE_ = os.path.join(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE_ = os.path.join(_lowercase , _lowercase )
print(F'''Save PyTorch model to {os.path.abspath(_lowercase )}''' )
torch.save(model.state_dict() , _lowercase )
print(F'''Save configuration file to {os.path.abspath(_lowercase )}''' )
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--xlnet_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained XLNet model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--finetuning_task''',
default=None,
type=str,
help='''Name of a task on which the XLNet TensorFlow model was fine-tuned''',
)
SCREAMING_SNAKE_CASE: Union[str, Any] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
) | 360 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__a = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__a = concatenate_datasets
__a = DownloadConfig
__a = DownloadManager
__a = DownloadMode
__a = DownloadConfig
__a = DownloadMode
__a = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager | 30 | 0 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None ) -> Tuple:
_UpperCAmelCase = start
_UpperCAmelCase = end
_UpperCAmelCase = val
_UpperCAmelCase = (start + end) // 2
_UpperCAmelCase = left
_UpperCAmelCase = right
def __repr__( self ) -> Union[str, Any]:
return F"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ ) -> Any:
_UpperCAmelCase = collection
_UpperCAmelCase = function
if self.collection:
_UpperCAmelCase = self._build_tree(0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
def __A ( self , snake_case_ , snake_case_ ) -> Any:
self._update_tree(self.root , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self , snake_case_ , snake_case_ ) -> Any:
return self._query_range(self.root , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self , snake_case_ , snake_case_ ) -> List[Any]:
if start == end:
return SegmentTreeNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.collection[start] )
_UpperCAmelCase = (start + end) // 2
_UpperCAmelCase = self._build_tree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._build_tree(mid + 1 , _SCREAMING_SNAKE_CASE )
return SegmentTreeNode(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.fn(left.val , right.val ) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
if node.start == i and node.end == i:
_UpperCAmelCase = val
return
if i <= node.mid:
self._update_tree(node.left , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
self._update_tree(node.right , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self.fn(node.left.val , node.right.val )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Tuple:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _SCREAMING_SNAKE_CASE , node.mid ) , self._query_range(node.right , node.mid + 1 , _SCREAMING_SNAKE_CASE ) , )
else:
# range in right child tree
return self._query_range(node.right , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> int:
if self.root is not None:
_UpperCAmelCase = Queue()
queue.put(self.root )
while not queue.empty():
_UpperCAmelCase = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
SCREAMING_SNAKE_CASE_ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 426 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
while a != 0:
UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = b % a, a
return b
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if gcd(_lowercase , _lowercase ) != 1:
UpperCAmelCase_ : int = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_lowercase )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = 1, 0, a
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = 0, 1, m
while va != 0:
UpperCAmelCase_ : List[Any] = ua // va
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 30 | 0 |
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
__lowercase : Dict = int(input('Enter number: ').strip())
print(f'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 476 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCAmelCase = '''CIDAS/clipseg-rd64-refined'''
lowerCAmelCase = '''image_segmenter'''
lowerCAmelCase = CLIPSegForImageSegmentation
lowerCAmelCase = ['''image''', '''text''']
lowerCAmelCase = ['''image''']
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Dict:
requires_backends(self ,['''vision'''] )
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
return self.pre_processor(text=[label] ,images=[image] ,padding=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
with torch.no_grad():
UpperCAmelCase_ : Dict = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : Dict = outputs.cpu().detach().numpy()
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : List[Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) ) | 30 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.