code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
UpperCAmelCase_ : Union[str, Any] = 'http://www.mocksite.com/file1.txt'
UpperCAmelCase_ : Optional[Any] = '"text": ["foo", "foo"]'
UpperCAmelCase_ : Optional[int] = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class SCREAMING_SNAKE_CASE__ :
snake_case__ : int = 200
snake_case__ : Any = {'''Content-Length''': '''100'''}
snake_case__ : Optional[Any] = {}
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> str:
return [bytes(SCREAMING_SNAKE_CASE__ , 'utf-8' )]
def SCREAMING_SNAKE_CASE_ ( *__A : Optional[int] , **__A : Tuple ) -> List[Any]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] , __A : Union[str, Any] , __A : Any ) -> Any:
"""simple docstring"""
import requests
monkeypatch.setattr(__A , 'request' , __A )
a_ : Dict = URL
if issubclass(__A , __A ):
a_ : Any = url
elif issubclass(__A , __A ):
a_ : Optional[int] = [url]
elif issubclass(__A , __A ):
a_ : Dict = {'train': url}
a_ : Tuple = 'dummy'
a_ : Optional[int] = 'downloads'
a_ : Optional[int] = tmp_path
a_ : List[Any] = DownloadConfig(
cache_dir=os.path.join(__A , __A ) , use_etag=__A , )
a_ : str = DownloadManager(dataset_name=__A , download_config=__A )
a_ : Union[str, Any] = dl_manager.download(__A )
a_ : Union[str, Any] = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__A , __A ):
a_ : Dict = [downloaded_paths]
a_ : Dict = [urls]
elif isinstance(__A , __A ):
assert "train" in downloaded_paths.keys()
a_ : List[str] = downloaded_paths.values()
a_ : Optional[Any] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__A , __A ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
a_ : List[Any] = Path(__A )
a_ : Optional[Any] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
a_ : Any = downloaded_path.read_text()
assert content == CONTENT
a_ : str = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
a_ : Optional[Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Union[str, Any] , __A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
a_ : List[str] = str(__A )
if issubclass(__A , __A ):
a_ : Any = filename
elif issubclass(__A , __A ):
a_ : Tuple = [filename]
elif issubclass(__A , __A ):
a_ : Union[str, Any] = {'train': filename}
a_ : int = 'dummy'
a_ : Optional[int] = xz_file.parent
a_ : Union[str, Any] = 'extracted'
a_ : Dict = DownloadConfig(
cache_dir=__A , use_etag=__A , )
a_ : str = DownloadManager(dataset_name=__A , download_config=__A )
a_ : Dict = dl_manager.extract(__A )
a_ : List[Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(__A , __A ):
a_ : List[Any] = [extracted_paths]
a_ : Union[str, Any] = [paths]
elif isinstance(__A , __A ):
assert "train" in extracted_paths.keys()
a_ : Dict = extracted_paths.values()
a_ : Optional[Any] = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__A , __A ):
assert extracted_path == dl_manager.extracted_paths[input_path]
a_ : int = Path(__A )
a_ : Dict = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__A , etag=__A )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
a_ : Any = extracted_path.read_text()
a_ : str = text_file.read_text()
assert extracted_file_content == expected_file_content
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
assert path.endswith('.jsonl' )
for num_items, line in enumerate(__A , start=1 ):
a_ : Tuple = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : List[Any] ) -> Tuple:
"""simple docstring"""
a_ : Union[str, Any] = request.getfixturevalue(__A )
a_ : Optional[int] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__A ) , start=1 ):
_test_jsonl(__A , __A )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : List[Any] ) -> str:
"""simple docstring"""
a_ : Any = request.getfixturevalue(__A )
a_ : Dict = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__A ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__A ) , start=1 ):
_test_jsonl(__A , __A )
assert num_tar == 1
assert num_jsonl == 2
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> str:
"""simple docstring"""
a_ : str = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__A ) , start=1 ):
assert os.path.basename(__A ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 32 |
from __future__ import annotations
UpperCAmelCase_ : Tuple = []
def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] , __A : int , __A : int ) -> bool:
"""simple docstring"""
for i in range(len(__A ) ):
if board[row][i] == 1:
return False
for i in range(len(__A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__A , -1 , -1 ) , range(__A , len(__A ) ) ):
if board[i][j] == 1:
return False
return True
def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] , __A : int ) -> bool:
"""simple docstring"""
if row >= len(__A ):
solution.append(__A )
printboard(__A )
print()
return True
for i in range(len(__A ) ):
if is_safe(__A , __A , __A ):
a_ : Any = 1
solve(__A , row + 1 )
a_ : Tuple = 0
return False
def SCREAMING_SNAKE_CASE_ ( __A : list[list[int]] ) -> None:
"""simple docstring"""
for i in range(len(__A ) ):
for j in range(len(__A ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
UpperCAmelCase_ : List[str] = 8
UpperCAmelCase_ : str = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 32 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : List[str] = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : int = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 353 |
'''simple docstring'''
def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(1_0_0, 0.25) = }""")
print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
| 0 | 0 |
"""simple docstring"""
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Optional[int]:
"""simple docstring"""
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
super().__init__()
snake_case = module
snake_case = nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase , bias=lowerCAmelCase ) , nn.Linear(lowerCAmelCase , module.out_features , bias=lowerCAmelCase ) , )
snake_case = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def snake_case ( self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ):
"""simple docstring"""
return self.module(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ) + self.adapter(lowerCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
_lowerCAmelCase : Union[str, Any] = """bigscience/bloom-1b7"""
# Constant values
_lowerCAmelCase : Any = 2.109_6595_5269_2574
_lowerCAmelCase : int = """Hello my name is"""
_lowerCAmelCase : Union[str, Any] = set()
EXPECTED_OUTPUTS.add("""Hello my name is John and I am a professional photographer. I""" )
EXPECTED_OUTPUTS.add("""Hello my name is John.\nI am a friend of your father.\n""" )
EXPECTED_OUTPUTS.add("""Hello my name is John Doe, I am a student at the University""" )
_lowerCAmelCase : Any = 10
def snake_case ( self ):
"""simple docstring"""
snake_case = AutoTokenizer.from_pretrained(self.model_name )
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
super().setUp()
# Models and tokenizer
snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
def snake_case ( self ):
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase , 'quantization_config' ) )
snake_case = config.to_dict()
snake_case = config.to_diff_dict()
snake_case = config.to_json_string()
def snake_case ( self ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
snake_case = self.model_fpaa.get_memory_footprint()
snake_case = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
snake_case = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def snake_case ( self ):
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
snake_case = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase ) , self.EXPECTED_OUTPUTS )
def snake_case ( self ):
"""simple docstring"""
snake_case = BitsAndBytesConfig()
snake_case = True
snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase , device_map='auto' )
snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
snake_case = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase ) , self.EXPECTED_OUTPUTS )
def snake_case ( self ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase ):
snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase , load_in_abit=lowerCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def snake_case ( self ):
"""simple docstring"""
with self.assertRaises(lowerCAmelCase ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(lowerCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(lowerCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
snake_case = self.model_fpaa.to(torch.floataa )
snake_case = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
snake_case = self.model_fpaa.to('cpu' )
# Check this does not throw an error
snake_case = self.model_fpaa.half()
# Check this does not throw an error
snake_case = self.model_fpaa.float()
def snake_case ( self ):
"""simple docstring"""
snake_case = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=lowerCAmelCase , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def snake_case ( cls ):
"""simple docstring"""
snake_case = 't5-small'
snake_case = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
snake_case = AutoTokenizer.from_pretrained(cls.model_name )
snake_case = 'Translate in German: Hello, my dog is cute'
def snake_case ( self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
"""simple docstring"""
from transformers import TaForConditionalGeneration
snake_case = TaForConditionalGeneration._keep_in_fpaa_modules
snake_case = None
# test with `t5-small`
snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
snake_case = model.generate(**lowerCAmelCase )
# test with `flan-t5-small`
snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
snake_case = model.generate(**lowerCAmelCase )
snake_case = modules
def snake_case ( self ):
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
snake_case = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
snake_case = model.generate(**lowerCAmelCase )
# test with `flan-t5-small`
snake_case = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
snake_case = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
snake_case = model.generate(**lowerCAmelCase )
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
super().setUp()
# model_name
snake_case = 'bigscience/bloom-560m'
snake_case = 't5-small'
# Different types of model
snake_case = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
# Sequence classification model
snake_case = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
# CausalLM model
snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map='auto' )
# Seq2seq model
snake_case = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase , device_map='auto' )
def snake_case ( self ):
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
super().setUp()
def snake_case ( self ):
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
"""simple docstring"""
snake_case = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
snake_case = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
super().setUp()
def snake_case ( self ):
"""simple docstring"""
snake_case = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
snake_case = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
snake_case = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase ) , self.EXPECTED_OUTPUTS )
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
snake_case = 'facebook/opt-350m'
super().setUp()
def snake_case ( self ):
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
snake_case = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
snake_case = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
snake_case = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase ) ):
snake_case = LoRALayer(module.q_proj , rank=16 )
snake_case = LoRALayer(module.k_proj , rank=16 )
snake_case = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
snake_case = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
snake_case = model.forward(**lowerCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = """gpt2-xl"""
_lowerCAmelCase : List[str] = 3.3191_8548_5415_2187
| 150 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCAmelCase__ ( _UpperCamelCase : Iterable[str] , _UpperCamelCase : int ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
snake_case = iter(_UpperCamelCase )
while True:
snake_case = tuple(itertools.islice(_UpperCamelCase , _UpperCamelCase ) )
if not chunk:
return
yield chunk
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str:
"""simple docstring"""
snake_case = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
snake_case = ''
if len(_UpperCamelCase ) < 2:
return dirty
for i in range(len(_UpperCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_UpperCamelCase ) & 1:
clean += "X"
return clean
def lowerCAmelCase__ ( _UpperCamelCase : str ) -> list[str]:
"""simple docstring"""
snake_case = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
snake_case = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_UpperCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_UpperCamelCase )
return table
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : str ) -> str:
"""simple docstring"""
snake_case = generate_table(_UpperCamelCase )
snake_case = prepare_input(_UpperCamelCase )
snake_case = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_UpperCamelCase , 2 ):
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : str ) -> str:
"""simple docstring"""
snake_case = generate_table(_UpperCamelCase )
snake_case = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_UpperCamelCase , 2 ):
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
snake_case ,snake_case = divmod(table.index(_UpperCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 150 | 1 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : List[str] = '▁'
_lowercase : str = {'vocab_file': 'prophetnet.tokenizer'}
_lowercase : List[str] = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
_lowercase : Union[str, Any] = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
_lowercase : Any = {
'microsoft/xprophetnet-large-wiki100-cased': 5_12,
}
def lowercase__ ( snake_case_ :Union[str, Any] ):
__UpperCAmelCase = collections.OrderedDict()
with open(snake_case_ , '''r''' , encoding='''utf-8''' ) as reader:
__UpperCAmelCase = reader.readlines()
for index, token in enumerate(snake_case_ ):
__UpperCAmelCase = token.rstrip('''\n''' )
__UpperCAmelCase = index
return vocab
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : int = VOCAB_FILES_NAMES
a__ : Dict = PRETRAINED_VOCAB_FILES_MAP
a__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : int = ["input_ids", "attention_mask"]
def __init__( self : str , _lowercase : Any , _lowercase : Dict="[SEP]" , _lowercase : Optional[int]="[SEP]" , _lowercase : Optional[int]="[SEP]" , _lowercase : List[str]="[UNK]" , _lowercase : Union[str, Any]="[PAD]" , _lowercase : str="[CLS]" , _lowercase : Optional[Any]="[MASK]" , _lowercase : Optional[Dict[str, Any]] = None , **_lowercase : Tuple , ):
__UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
__UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowercase ) )
__UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__UpperCAmelCase = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
__UpperCAmelCase = F'''[unused{i}]'''
__UpperCAmelCase = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__UpperCAmelCase = 12
__UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_lowercase )
def __getstate__( self : Dict ):
__UpperCAmelCase = self.__dict__.copy()
__UpperCAmelCase = None
return state
def __setstate__( self : List[Any] , _lowercase : List[Any] ):
__UpperCAmelCase = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__UpperCAmelCase = {}
__UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return ([0] * len(_lowercase )) + [1]
return ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
def a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a ( self : Optional[Any] ):
return len(self.sp_model ) + self.fairseq_offset
def a ( self : str ):
__UpperCAmelCase = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a ( self : str , _lowercase : str ):
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def a ( self : Optional[Any] , _lowercase : Optional[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase = self.sp_model.PieceToId(_lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a ( self : Dict , _lowercase : List[str] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a ( self : Optional[int] , _lowercase : Any ):
__UpperCAmelCase = ''''''.join(_lowercase ).replace(_lowercase , ''' ''' ).strip()
return out_string
def a ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
if not os.path.isdir(_lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , '''wb''' ) as fi:
__UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
def a ( self : Union[str, Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__UpperCAmelCase = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 86 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_lowercase : str = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class _UpperCAmelCase :
def __init__( self : List[Any] , _lowercase : int = 14 ):
if group not in primes:
raise ValueError('''Unsupported Group''' )
__UpperCAmelCase = primes[group]['''prime''']
__UpperCAmelCase = primes[group]['''generator''']
__UpperCAmelCase = int(hexlify(urandom(32 ) ) , base=16 )
def a ( self : int ):
return hex(self.__private_key )[2:]
def a ( self : Dict ):
__UpperCAmelCase = pow(self.generator , self.__private_key , self.prime )
return hex(_lowercase )[2:]
def a ( self : Union[str, Any] , _lowercase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_lowercase , (self.prime - 1) // 2 , self.prime ) == 1
)
def a ( self : Optional[Any] , _lowercase : str ):
__UpperCAmelCase = int(_lowercase , base=16 )
if not self.is_valid_public_key(_lowercase ):
raise ValueError('''Invalid public key''' )
__UpperCAmelCase = pow(_lowercase , self.__private_key , self.prime )
return shaaaa(str(_lowercase ).encode() ).hexdigest()
@staticmethod
def a ( _lowercase : int , _lowercase : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_lowercase , (prime - 1) // 2 , _lowercase ) == 1
)
@staticmethod
def a ( _lowercase : str , _lowercase : str , _lowercase : int = 14 ):
__UpperCAmelCase = int(_lowercase , base=16 )
__UpperCAmelCase = int(_lowercase , base=16 )
__UpperCAmelCase = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(_lowercase , _lowercase ):
raise ValueError('''Invalid public key''' )
__UpperCAmelCase = pow(_lowercase , _lowercase , _lowercase )
return shaaaa(str(_lowercase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86 | 1 |
lowerCamelCase : Tuple = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowerCamelCase : int = ['''a''', '''b''', '''c''', '''d''', '''e''']
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ):
__lowercase : Dict = start
# add current to visited
visited.append(lowerCAmelCase_ )
__lowercase : Dict = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowercase : List[Any] = topological_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# if all neighbors visited add current to sort
sort.append(lowerCAmelCase_ )
# if all vertices haven't been visited select a new one to visit
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
for vertice in vertices:
if vertice not in visited:
__lowercase : Tuple = topological_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# return sort
return sort
if __name__ == "__main__":
lowerCamelCase : Any = topological_sort('''a''', [], [])
print(sort)
| 233 |
from itertools import permutations
def snake_case_ ( lowerCAmelCase_ : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__lowercase : Dict = [7, 11, 13, 17]
for i, test in enumerate(lowerCAmelCase_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def snake_case_ ( lowerCAmelCase_ : int = 10 ):
return sum(
int("""""".join(map(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
for num in permutations(range(lowerCAmelCase_ ) )
if is_substring_divisible(lowerCAmelCase_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 233 | 1 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def A_ ( *lowercase , **lowercase ):
pass
@is_pipeline_test
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def A_ ( self ):
_lowerCamelCase : Optional[Any] = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
_lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCamelCase : List[Any] = image_classifier(a_ , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(a_ ) , [
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}],
[{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'c'}, {'score': 0.3_33, 'label': 'b'}],
] , )
_lowerCamelCase : Optional[int] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
] , )
@require_tf
def A_ ( self ):
_lowerCamelCase : str = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
_lowerCamelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCamelCase : Union[str, Any] = image_classifier(a_ , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(a_ ) , [{'score': 0.3_33, 'label': 'a'}, {'score': 0.3_33, 'label': 'b'}, {'score': 0.3_33, 'label': 'c'}] , )
_lowerCamelCase : int = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
[
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
{'score': 0.3_33, 'label': ANY(a_ )},
],
] , )
@slow
@require_torch
def A_ ( self ):
_lowerCamelCase : List[str] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
_lowerCamelCase : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCamelCase : Union[str, Any] = image_classifier(a_ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(a_ ) , [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
] , )
_lowerCamelCase : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def A_ ( self ):
_lowerCamelCase : Optional[int] = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
_lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_lowerCamelCase : Optional[int] = image_classifier(a_ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(a_ ) , [
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
] , )
_lowerCamelCase : List[Any] = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(a_ ) , [
[
{'score': 0.5_11, 'label': 'remote'},
{'score': 0.4_85, 'label': 'cat'},
{'score': 0.0_04, 'label': 'plane'},
],
]
* 5 , )
| 358 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowercase__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """facebook/nllb-200-distilled-600M"""
lowerCamelCase__ = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
lowerCamelCase__ = """translator"""
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = LANGUAGE_CODES
lowerCamelCase__ = ["""text""", """text""", """text"""]
lowerCamelCase__ = ["""text"""]
def A_ ( self , lowercase , lowercase , lowercase ):
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
_lowerCamelCase : str = self.lang_to_code[src_lang]
_lowerCamelCase : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowercase , return_tensors='pt' , src_lang=lowercase , tgt_lang=lowercase )
def A_ ( self , lowercase ):
return self.model.generate(**lowercase )
def A_ ( self , lowercase ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowercase )
| 12 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class _lowercase :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PegasusConfig
SCREAMING_SNAKE_CASE__ : Tuple = {}
SCREAMING_SNAKE_CASE__ : Tuple = '''gelu'''
def __init__( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Union[str, Any]=7 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :Tuple=99 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :Union[str, Any]=4 , lowerCAmelCase__ :List[str]=37 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :str=40 , lowerCAmelCase__ :Dict=2 , lowerCAmelCase__ :List[str]=1 , lowerCAmelCase__ :str=0 , ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = parent
__SCREAMING_SNAKE_CASE : List[str] = batch_size
__SCREAMING_SNAKE_CASE : Dict = seq_length
__SCREAMING_SNAKE_CASE : int = is_training
__SCREAMING_SNAKE_CASE : Any = use_labels
__SCREAMING_SNAKE_CASE : Dict = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = hidden_size
__SCREAMING_SNAKE_CASE : Any = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
__SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = eos_token_id
__SCREAMING_SNAKE_CASE : List[str] = pad_token_id
__SCREAMING_SNAKE_CASE : Optional[int] = bos_token_id
def __magic_name__( self :str ) -> Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__SCREAMING_SNAKE_CASE : str = tf.concat([input_ids, eos_tensor] , axis=1 )
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__SCREAMING_SNAKE_CASE : Tuple = prepare_pegasus_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def __magic_name__( self :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = TFPegasusModel(config=lowerCAmelCase__ ).get_decoder()
__SCREAMING_SNAKE_CASE : List[Any] = inputs_dict['''input_ids''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids[:1, :]
__SCREAMING_SNAKE_CASE : Optional[Any] = inputs_dict['''attention_mask'''][:1, :]
__SCREAMING_SNAKE_CASE : str = inputs_dict['''head_mask''']
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
# first forward pass
__SCREAMING_SNAKE_CASE : Any = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__SCREAMING_SNAKE_CASE : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
__SCREAMING_SNAKE_CASE : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__SCREAMING_SNAKE_CASE : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx]
__SCREAMING_SNAKE_CASE : Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-3 )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , ):
if attention_mask is None:
__SCREAMING_SNAKE_CASE : str = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__SCREAMING_SNAKE_CASE : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__SCREAMING_SNAKE_CASE : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__SCREAMING_SNAKE_CASE : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : List[str] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[str] = TFPegasusModelTester(self )
__SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=lowerCAmelCase__ )
def __magic_name__( self :Tuple ) -> List[Any]:
self.config_tester.run_common_tests()
def __magic_name__( self :Tuple ) -> Dict:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
SCREAMING_SNAKE_CASE__ : int = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''google/pegasus-xsum'''
@cached_property
def __magic_name__( self :Tuple ) -> Optional[Any]:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __magic_name__( self :List[str] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __magic_name__( self :Union[str, Any] , **lowerCAmelCase__ :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.translate_src_text(**lowerCAmelCase__ )
assert self.expected_text == generated_words
def __magic_name__( self :Any , **lowerCAmelCase__ :int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(self.src_text , **lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''tf''' )
__SCREAMING_SNAKE_CASE : str = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Any = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase__ )
return generated_words
@slow
def __magic_name__( self :Tuple ) -> int:
self._assert_generated_batch_equal_expected()
| 9 |
from __future__ import annotations
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Dict = 0.00
__SCREAMING_SNAKE_CASE : List[str] = 0
for resistor in resistors:
if resistor <= 0:
__SCREAMING_SNAKE_CASE : Any = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : Tuple = 0.00
__SCREAMING_SNAKE_CASE : int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__SCREAMING_SNAKE_CASE : Tuple = F'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
return "".join(chr(ord(_lowercase ) - 32 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 367 |
"""simple docstring"""
a : Optional[int] = 8.31_4462 # Unit - J mol-1 K-1
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 79 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Any = logging.get_logger(__name__)
snake_case : Dict = {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"
),
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'dpr'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1e-12 , _a=0 , _a="absolute" , _a = 0 , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__magic_name__ : Optional[int] = vocab_size
__magic_name__ : List[str] = hidden_size
__magic_name__ : Optional[Any] = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : Union[str, Any] = intermediate_size
__magic_name__ : Optional[Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : int = type_vocab_size
__magic_name__ : Tuple = initializer_range
__magic_name__ : Optional[int] = layer_norm_eps
__magic_name__ : Tuple = projection_dim
__magic_name__ : Dict = position_embedding_type
| 281 |
import math
def lowerCAmelCase_ ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
return math.pow(_snake_case , 2 ) - a
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
return 2 * x
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
__magic_name__ : Optional[int] = 2.0
while start <= a:
__magic_name__ : str = math.pow(_snake_case , 2 )
return start
def lowerCAmelCase_ ( _snake_case : float , _snake_case : int = 9999 , _snake_case : float = 0.00_000_000_000_001 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
__magic_name__ : Optional[int] = get_initial_point(_snake_case )
for _ in range(_snake_case ):
__magic_name__ : int = value
__magic_name__ : str = value - fx(_snake_case , _snake_case ) / fx_derivative(_snake_case )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 281 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : List[Any] ) -> str:
'''simple docstring'''
A__ = tempfile.mkdtemp()
A__ = BlipImageProcessor()
A__ = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" )
A__ = BertTokenizerFast.from_pretrained("hf-internal-testing/tiny-random-bert" )
A__ = InstructBlipProcessor(snake_case_ , snake_case_ , snake_case_ )
processor.save_pretrained(self.tmpdirname )
def __magic_name__ ( self : List[str] , **snake_case_ : Any ) -> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).tokenizer
def __magic_name__ ( self : Union[str, Any] , **snake_case_ : Dict ) -> List[str]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).image_processor
def __magic_name__ ( self : Union[str, Any] , **snake_case_ : Tuple ) -> Dict:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case_ ).qformer_tokenizer
def __magic_name__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
A__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
A__ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A__ = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
A__ = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
self.assertIsInstance(processor.qformer_tokenizer , snake_case_ )
def __magic_name__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = self.get_qformer_tokenizer()
A__ = InstructBlipProcessor(
tokenizer=snake_case_ , image_processor=snake_case_ , qformer_tokenizer=snake_case_ )
A__ = self.prepare_image_inputs()
A__ = image_processor(snake_case_ , return_tensors="np" )
A__ = processor(images=snake_case_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __magic_name__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = self.get_qformer_tokenizer()
A__ = InstructBlipProcessor(
tokenizer=snake_case_ , image_processor=snake_case_ , qformer_tokenizer=snake_case_ )
A__ = "lower newer"
A__ = processor(text=snake_case_ )
A__ = tokenizer(snake_case_ , return_token_type_ids=snake_case_ )
A__ = qformer_tokenizer(snake_case_ , return_token_type_ids=snake_case_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["qformer_" + key] )
def __magic_name__ ( self : Any ) -> Tuple:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = self.get_qformer_tokenizer()
A__ = InstructBlipProcessor(
tokenizer=snake_case_ , image_processor=snake_case_ , qformer_tokenizer=snake_case_ )
A__ = "lower newer"
A__ = self.prepare_image_inputs()
A__ = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def __magic_name__ ( self : List[str] ) -> Dict:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = self.get_qformer_tokenizer()
A__ = InstructBlipProcessor(
tokenizer=snake_case_ , image_processor=snake_case_ , qformer_tokenizer=snake_case_ )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(snake_case_ )
A__ = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __magic_name__ ( self : Any ) -> int:
'''simple docstring'''
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = self.get_qformer_tokenizer()
A__ = InstructBlipProcessor(
tokenizer=snake_case_ , image_processor=snake_case_ , qformer_tokenizer=snake_case_ )
A__ = "lower newer"
A__ = self.prepare_image_inputs()
A__ = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(
list(inputs.keys() ) , ["input_ids", "attention_mask", "qformer_input_ids", "qformer_attention_mask", "pixel_values"] , )
| 230 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["MobileViTFeatureExtractor"]
SCREAMING_SNAKE_CASE = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 230 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class UpperCamelCase :
def __init__( self, lowerCAmelCase__) -> Union[str, Any]:
snake_case_ = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []})
for keyword in keywords:
self.add_keyword(lowerCAmelCase__)
self.set_fail_transitions()
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def a_ ( self, lowerCAmelCase__) -> None:
snake_case_ = 0
for character in keyword:
snake_case_ = self.find_next_state(lowerCAmelCase__, lowerCAmelCase__)
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
})
self.adlist[current_state]["next_states"].append(len(self.adlist) - 1)
snake_case_ = len(self.adlist) - 1
else:
snake_case_ = next_state
self.adlist[current_state]["output"].append(lowerCAmelCase__)
def a_ ( self) -> None:
snake_case_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(lowerCAmelCase__)
snake_case_ = 0
while q:
snake_case_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(lowerCAmelCase__)
snake_case_ = self.adlist[r]['fail_state']
while (
self.find_next_state(lowerCAmelCase__, self.adlist[child]['value']) is None
and state != 0
):
snake_case_ = self.adlist[state]['fail_state']
snake_case_ = self.find_next_state(
lowerCAmelCase__, self.adlist[child]['value'])
if self.adlist[child]["fail_state"] is None:
snake_case_ = 0
snake_case_ = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def a_ ( self, lowerCAmelCase__) -> dict[str, list[int]]:
snake_case_ = {} # returns a dict with keywords and list of its occurrences
snake_case_ = 0
for i in range(len(lowerCAmelCase__)):
while (
self.find_next_state(lowerCAmelCase__, string[i]) is None
and current_state != 0
):
snake_case_ = self.adlist[current_state]['fail_state']
snake_case_ = self.find_next_state(lowerCAmelCase__, string[i])
if next_state is None:
snake_case_ = 0
else:
snake_case_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
snake_case_ = []
result[key].append(i - len(lowerCAmelCase__) + 1)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Optional[Any] ,__lowercase : Optional[int]=8 ):
'''simple docstring'''
A_ : Optional[int] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A_ : Optional[int] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
unet=lowercase , scheduler=lowercase , movq=lowercase , )
A_ : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
if latents is None:
A_ : List[str] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
A_ : List[str] = latents.to(lowercase )
A_ : int = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self , lowercase=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
A_ : Dict = torch.device(F'''cuda:{gpu_id}''' )
A_ : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
def lowerCAmelCase_ ( self , lowercase=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
A_ : Tuple = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A_ : Optional[int] = None
for cpu_offloaded_model in [self.unet, self.movq]:
A_ , A_ : int = cpu_offload_with_hook(lowercase , lowercase , prev_module_hook=lowercase )
# We'll offload the last model manually.
A_ : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase , lowercase , lowercase = 5_1_2 , lowercase = 5_1_2 , lowercase = 1_0_0 , lowercase = 4.0 , lowercase = 1 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , ):
"""simple docstring"""
A_ : Dict = self._execution_device
A_ : Dict = guidance_scale > 1.0
if isinstance(lowercase , lowercase ):
A_ : Dict = torch.cat(lowercase , dim=0 )
if isinstance(lowercase , lowercase ):
A_ : str = torch.cat(lowercase , dim=0 )
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = torch.cat(lowercase , dim=0 )
A_ : str = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
A_ : str = image_embeds.repeat_interleave(lowercase , dim=0 )
A_ : Union[str, Any] = negative_image_embeds.repeat_interleave(lowercase , dim=0 )
A_ : Optional[Any] = hint.repeat_interleave(lowercase , dim=0 )
A_ : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase )
A_ : Optional[Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase )
self.scheduler.set_timesteps(lowercase , device=lowercase )
A_ : Any = self.scheduler.timesteps
A_ : str = self.movq.config.latent_channels
A_ , A_ : List[Any] = downscale_height_and_width(lowercase , lowercase , self.movq_scale_factor )
# create initial latent
A_ : int = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
A_ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : Any = {'image_embeds': image_embeds, 'hint': hint}
A_ : Dict = self.unet(
sample=lowercase , timestep=lowercase , encoder_hidden_states=lowercase , added_cond_kwargs=lowercase , return_dict=lowercase , )[0]
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
A_ , A_ : List[str] = noise_pred.chunk(2 )
A_ , A_ : List[str] = variance_pred.chunk(2 )
A_ : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A_ , A_ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A_ : Tuple = self.scheduler.step(
lowercase , lowercase , lowercase , generator=lowercase , )[0]
# post-processing
A_ : Any = self.movq.decode(lowercase , force_not_quantize=lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
A_ : Optional[Any] = image * 0.5 + 0.5
A_ : int = image.clamp(0 , 1 )
A_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A_ : Optional[int] = self.numpy_to_pil(lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase )
| 140 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowerCAmelCase_ : Optional[int] = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ):
'''simple docstring'''
UpperCAmelCase = True
while ask_again:
UpperCAmelCase = input(lowerCAmelCase )
try:
if default is not None and len(lowerCAmelCase ) == 0:
return default
return convert_value(lowerCAmelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCAmelCase )
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase=[] , lowerCAmelCase=None , lowerCAmelCase=0 ):
'''simple docstring'''
UpperCAmelCase = BulletMenu(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = menu.run(default_choice=lowerCAmelCase )
return convert_value(lowerCAmelCase ) if convert_value is not None else result
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = int(lowerCAmelCase )
return ComputeEnvironment(["""LOCAL_MACHINE""", """AMAZON_SAGEMAKER"""][value] )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = int(lowerCAmelCase )
return DistributedType(["""NO""", """MULTI_CPU""", """MULTI_XPU""", """MULTI_GPU""", """MULTI_NPU""", """TPU"""][value] )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = int(lowerCAmelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = int(lowerCAmelCase )
return PrecisionType(["""no""", """fp16""", """bf16""", """fp8"""][value] )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = int(lowerCAmelCase )
return SageMakerDistributedType(["""NO""", """DATA_PARALLEL""", """MODEL_PARALLEL"""][value] )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class UpperCamelCase_ ( argparse.RawDescriptionHelpFormatter ):
def UpperCamelCase_ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = super()._format_usage(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase = usage.replace("""<command> [<args>] """ , """""" )
return usage
| 248 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=a_ )
class UpperCamelCase_ ( a_ ):
_A : str = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_A : ClassVar[Features] = Features({'image': Image()} )
_A : ClassVar[Features] = Features({'labels': ClassLabel} )
_A : str = "image"
_A : str = "labels"
def UpperCamelCase_ ( self , snake_case__ ) -> List[str]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , snake_case__ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
UpperCAmelCase = copy.deepcopy(self )
UpperCAmelCase = self.label_schema.copy()
UpperCAmelCase = features[self.label_column]
UpperCAmelCase = label_schema
return task_template
@property
def UpperCamelCase_ ( self ) -> Dict[str, str]:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 248 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = CLIPTokenizer
A : Tuple = CLIPTokenizerFast
A : Any = True
A : Dict = {}
A : List[Any] = False
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE : int = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE : List[Any] = dict(zip(A, range(len(A ) ) ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE : str = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, **A ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = 'lower newer'
SCREAMING_SNAKE_CASE : Union[str, Any] = 'lower newer'
return input_text, output_text
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
SCREAMING_SNAKE_CASE : int = 'lower newer'
SCREAMING_SNAKE_CASE : List[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize(A )
self.assertListEqual(A, A )
SCREAMING_SNAKE_CASE : int = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
@require_ftfy
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A, **A )
SCREAMING_SNAKE_CASE : int = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE : Dict = tokenizer_s.tokenize(A )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.tokenize(A )
self.assertListEqual(A, A )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE : Union[str, Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE : List[str] = tokenizer_s.tokenize(A )
SCREAMING_SNAKE_CASE : Any = tokenizer_r.tokenize(A )
self.assertListEqual(A, A )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_s.tokenize(A )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.tokenize(A )
self.assertListEqual(A, A )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_s.tokenize(A )
SCREAMING_SNAKE_CASE : int = tokenizer_r.tokenize(A )
self.assertListEqual(A, A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE : Tuple = F"{text_of_1_token} {text_of_1_token}"
SCREAMING_SNAKE_CASE : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
SCREAMING_SNAKE_CASE : Union[str, Any] = F" {text}"
SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, )
SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(A ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def UpperCamelCase_ ( self ):
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def UpperCamelCase_ ( self ):
'''simple docstring'''
pass
| 251 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
UpperCamelCase_ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
UpperCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
with open(__UpperCamelCase ,'rb' ) as f:
SCREAMING_SNAKE_CASE : List[str] = Image.open(__UpperCamelCase )
return im.convert('RGB' )
@dataclass
class _a :
'''simple docstring'''
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''A folder containing the training data.'''} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''A folder containing the validation data.'''} )
A : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
A : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
A : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class _a :
'''simple docstring'''
A : str = field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(SCREAMING_SNAKE_CASE )} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
A : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
A : str = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Name or path of preprocessor config.'''} )
A : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def lowercase__( __UpperCamelCase: Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = torch.stack([example['pixel_values'] for example in examples] )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' ,__UpperCamelCase ,__UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : str = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
SCREAMING_SNAKE_CASE : Any = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir ,task='image-classification' ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if data_args.train_dir is not None:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(data_args.train_dir ,'**' )
if data_args.validation_dir is not None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(data_args.validation_dir ,'**' )
SCREAMING_SNAKE_CASE : str = load_dataset(
'imagefolder' ,data_files=__UpperCamelCase ,cache_dir=model_args.cache_dir ,task='image-classification' ,)
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE : Tuple = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split ,__UpperCamelCase ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE : int = dataset['train'].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE : Optional[int] = split['train']
SCREAMING_SNAKE_CASE : int = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE : int = dataset['train'].features['labels'].names
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = {}, {}
for i, label in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = str(__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE : Any = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCamelCase: Dict ):
return metric.compute(predictions=np.argmax(p.predictions ,axis=1 ) ,references=p.label_ids )
SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(__UpperCamelCase ) ,labelaid=__UpperCamelCase ,idalabel=__UpperCamelCase ,finetuning_task='image-classification' ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=__UpperCamelCase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : List[Any] = (image_processor.size['height'], image_processor.size['width'])
SCREAMING_SNAKE_CASE : Dict = Normalize(mean=image_processor.image_mean ,std=image_processor.image_std )
SCREAMING_SNAKE_CASE : Dict = Compose(
[
RandomResizedCrop(__UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
SCREAMING_SNAKE_CASE : List[Any] = Compose(
[
Resize(__UpperCamelCase ),
CenterCrop(__UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__UpperCamelCase: List[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(__UpperCamelCase: Dict ):
SCREAMING_SNAKE_CASE : List[str] = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Tuple = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__UpperCamelCase )
# Initalize our trainer
SCREAMING_SNAKE_CASE : List[Any] = Trainer(
model=__UpperCamelCase ,args=__UpperCamelCase ,train_dataset=dataset['train'] if training_args.do_train else None ,eval_dataset=dataset['validation'] if training_args.do_eval else None ,compute_metrics=__UpperCamelCase ,tokenizer=__UpperCamelCase ,data_collator=__UpperCamelCase ,)
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : Any = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = last_checkpoint
SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model()
trainer.log_metrics('train' ,train_result.metrics )
trainer.save_metrics('train' ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate()
trainer.log_metrics('eval' ,__UpperCamelCase )
trainer.save_metrics('eval' ,__UpperCamelCase )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE : List[str] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
if __name__ == "__main__":
main()
| 251 | 1 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : List[Any] = k_size // 2
__UpperCAmelCase : Any = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__UpperCAmelCase : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(__a ) + square(__a )) / (2 * square(__a )) )
return g
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = image.shape[0], image.shape[1]
# dst image height and width
__UpperCAmelCase : int = height - k_size + 1
__UpperCAmelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__UpperCAmelCase : Optional[Any] = zeros((dst_height * dst_width, k_size * k_size) )
__UpperCAmelCase : Tuple = 0
for i, j in product(range(__a ), range(__a ) ):
__UpperCAmelCase : Optional[int] = ravel(image[i : i + k_size, j : j + k_size] )
__UpperCAmelCase : str = window
row += 1
# turn the kernel into shape(k*k, 1)
__UpperCAmelCase : List[Any] = gen_gaussian_kernel(__a, __a )
__UpperCAmelCase : str = ravel(__a )
# reshape and get the dst image
__UpperCAmelCase : Optional[int] = dot(__a, __a ).reshape(__a, __a ).astype(__a )
return dst
if __name__ == "__main__":
# read original image
_snake_case = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_snake_case = gaussian_filter(gray, 3, sigma=1)
_snake_case = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 350 |
import math
_snake_case = 10
_snake_case = 7
_snake_case = BALLS_PER_COLOUR * NUM_COLOURS
def _UpperCamelCase ( snake_case__ = 20 ) -> str:
__UpperCAmelCase : Optional[Any] = math.comb(snake_case__, snake_case__ )
__UpperCAmelCase : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR, snake_case__ )
__UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total)
return f'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 342 | 0 |
"""simple docstring"""
from manim import *
class A_ ( A__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =Rectangle(height=0.5 , width=0.5 )
lowerCamelCase__ : Optional[Any] =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ : Dict =[mem.copy() for i in range(6 )]
lowerCamelCase__ : List[str] =[mem.copy() for i in range(6 )]
lowerCamelCase__ : Optional[int] =VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCamelCase__ : Union[str, Any] =VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCamelCase__ : List[Any] =VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCamelCase__ : Optional[Any] =Text('CPU' , font_size=24 )
lowerCamelCase__ : Union[str, Any] =Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
lowerCamelCase__ : Tuple =[mem.copy() for i in range(1 )]
lowerCamelCase__ : Union[str, Any] =VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCamelCase__ : Dict =Text('GPU' , font_size=24 )
lowerCamelCase__ : List[Any] =Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.align_to(lowerCamelCase_ , lowerCamelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase_ )
lowerCamelCase__ : Any =[mem.copy() for i in range(6 )]
lowerCamelCase__ : List[str] =VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
lowerCamelCase__ : str =Text('Model' , font_size=24 )
lowerCamelCase__ : List[str] =Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) , )
lowerCamelCase__ : str =MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
lowerCamelCase__ : Optional[int] =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ : Tuple =MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=2.5 ) , Write(lowerCamelCase_ ) , Write(lowerCamelCase_ ) )
self.add(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =[]
lowerCamelCase__ : Any =[]
lowerCamelCase__ : Optional[Any] =[]
for i, rect in enumerate(lowerCamelCase_ ):
lowerCamelCase__ : str =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.7 )
cpu_target.move_to(lowerCamelCase_ )
cpu_target.generate_target()
lowerCamelCase__ : Dict =0.46 / 4
lowerCamelCase__ : Tuple =0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase_ , buff=0.0 )
cpu_targs.append(lowerCamelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase_ ) )
second_animations.append(MoveToTarget(lowerCamelCase_ , run_time=1.5 ) )
self.play(*lowerCamelCase_ )
self.play(*lowerCamelCase_ )
self.wait()
| 126 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCAmelCase_ ( ) ->Tuple:
lowerCamelCase__ : Dict =_ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCamelCase__ : int =get_sagemaker_input()
else:
lowerCamelCase__ : List[str] =get_cluster_input()
return config
def lowerCAmelCase_ ( snake_case_ : List[Any]=None ) ->List[str]:
if subparsers is not None:
lowerCamelCase__ : Union[str, Any] =subparsers.add_parser('config' , description=snake_case_ )
else:
lowerCamelCase__ : Tuple =argparse.ArgumentParser('Accelerate config command' , description=snake_case_ )
parser.add_argument(
'--config_file' , default=snake_case_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def lowerCAmelCase_ ( snake_case_ : str ) ->List[Any]:
lowerCamelCase__ : Optional[int] =get_user_input()
if args.config_file is not None:
lowerCamelCase__ : Dict =args.config_file
else:
if not os.path.isdir(snake_case_ ):
os.makedirs(snake_case_ )
lowerCamelCase__ : Optional[Any] =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(snake_case_ )
else:
config.to_yaml_file(snake_case_ )
print(f"""accelerate configuration saved at {config_file}""" )
def lowerCAmelCase_ ( ) ->Optional[Any]:
lowerCamelCase__ : Tuple =config_command_parser()
lowerCamelCase__ : Tuple =parser.parse_args()
config_command(snake_case_ )
if __name__ == "__main__":
main()
| 126 | 1 |
"""simple docstring"""
from functools import lru_cache
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = 2
A__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCamelCase__ )
if n > 1:
factors.add(UpperCamelCase__ )
return factors
@lru_cache
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return len(unique_prime_factors(UpperCamelCase__ ) )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
return len(set(UpperCamelCase__ ) ) in (0, 1)
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = 2
while True:
# Increment each value of a generated range
A__ = [base + i for i in range(UpperCamelCase__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
A__ = [upf_len(UpperCamelCase__ ) for x in group]
checker.append(UpperCamelCase__ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCamelCase__ ):
return group
# Increment our base variable by 1
base += 1
def UpperCAmelCase ( UpperCamelCase__ = 4 ):
"""simple docstring"""
A__ = run(UpperCamelCase__ )
return results[0] if len(UpperCamelCase__ ) else None
if __name__ == "__main__":
print(solution())
| 154 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> int:
A__ = tempfile.mkdtemp()
A__ = BlipImageProcessor()
A__ = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
A__ = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' )
A__ = InstructBlipProcessor(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def snake_case__ ( self ,**__UpperCAmelCase ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname ,**__UpperCAmelCase ).tokenizer
def snake_case__ ( self ,**__UpperCAmelCase ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname ,**__UpperCAmelCase ).image_processor
def snake_case__ ( self ,**__UpperCAmelCase ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname ,**__UpperCAmelCase ).qformer_tokenizer
def snake_case__ ( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self ) -> str:
A__ = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__UpperCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self ) -> Any:
A__ = InstructBlipProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ,qformer_tokenizer=self.get_qformer_tokenizer() ,)
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
A__ = self.get_image_processor(do_normalize=__UpperCAmelCase ,padding_value=1.0 )
A__ = InstructBlipProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,__UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,__UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer ,__UpperCAmelCase )
def snake_case__ ( self ) -> str:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = self.get_qformer_tokenizer()
A__ = InstructBlipProcessor(
tokenizer=__UpperCAmelCase ,image_processor=__UpperCAmelCase ,qformer_tokenizer=__UpperCAmelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__UpperCAmelCase ,return_tensors='np' )
A__ = processor(images=__UpperCAmelCase ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def snake_case__ ( self ) -> Tuple:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = self.get_qformer_tokenizer()
A__ = InstructBlipProcessor(
tokenizer=__UpperCAmelCase ,image_processor=__UpperCAmelCase ,qformer_tokenizer=__UpperCAmelCase )
A__ = 'lower newer'
A__ = processor(text=__UpperCAmelCase )
A__ = tokenizer(__UpperCAmelCase ,return_token_type_ids=__UpperCAmelCase )
A__ = qformer_tokenizer(__UpperCAmelCase ,return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] ,encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] ,encoded_processor['qformer_' + key] )
def snake_case__ ( self ) -> str:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = self.get_qformer_tokenizer()
A__ = InstructBlipProcessor(
tokenizer=__UpperCAmelCase ,image_processor=__UpperCAmelCase ,qformer_tokenizer=__UpperCAmelCase )
A__ = 'lower newer'
A__ = self.prepare_image_inputs()
A__ = processor(text=__UpperCAmelCase ,images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) ,['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] ,)
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def snake_case__ ( self ) -> Tuple:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = self.get_qformer_tokenizer()
A__ = InstructBlipProcessor(
tokenizer=__UpperCAmelCase ,image_processor=__UpperCAmelCase ,qformer_tokenizer=__UpperCAmelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(__UpperCAmelCase )
A__ = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ) -> Any:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = self.get_qformer_tokenizer()
A__ = InstructBlipProcessor(
tokenizer=__UpperCAmelCase ,image_processor=__UpperCAmelCase ,qformer_tokenizer=__UpperCAmelCase )
A__ = 'lower newer'
A__ = self.prepare_image_inputs()
A__ = processor(text=__UpperCAmelCase ,images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) ,['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] ,)
| 154 | 1 |
"""simple docstring"""
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_A = logging.get_logger(__name__)
@dataclass
class _lowerCamelCase :
_lowerCamelCase :Dict = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
_lowerCamelCase :Any = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
_lowerCamelCase :Dict = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowerCamelCase :Optional[Any] = field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.task_name.lower()
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[Any] = "train"
_lowerCamelCase :int = "dev"
_lowerCamelCase :int = "test"
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[Any] = 42
_lowerCamelCase :List[Any] = 42
_lowerCamelCase :Any = 42
def __init__( self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Dict = None , UpperCamelCase : str = Split.train , UpperCamelCase : Union[str, Any] = None , ) -> int:
"""simple docstring"""
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , UpperCamelCase , )
lowerCAmelCase__ : Tuple = args
lowerCAmelCase__ : Optional[int] = glue_processors[args.task_name]()
lowerCAmelCase__ : Any = glue_output_modes[args.task_name]
if isinstance(UpperCamelCase , UpperCamelCase ):
try:
lowerCAmelCase__ : Optional[Any] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
lowerCAmelCase__ : Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
lowerCAmelCase__ : Optional[int] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = label_list[2], label_list[1]
lowerCAmelCase__ : Tuple = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase__ : Union[str, Any] = cached_features_file + """.lock"""
with FileLock(UpperCamelCase ):
if os.path.exists(UpperCamelCase ) and not args.overwrite_cache:
lowerCAmelCase__ : Any = time.time()
lowerCAmelCase__ : List[str] = torch.load(UpperCamelCase )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
lowerCAmelCase__ : str = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCAmelCase__ : Any = self.processor.get_test_examples(args.data_dir )
else:
lowerCAmelCase__ : int = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCAmelCase__ : Optional[Any] = examples[:limit_length]
lowerCAmelCase__ : Optional[int] = glue_convert_examples_to_features(
UpperCamelCase , UpperCamelCase , max_length=args.max_seq_length , label_list=UpperCamelCase , output_mode=self.output_mode , )
lowerCAmelCase__ : Optional[int] = time.time()
torch.save(self.features , UpperCamelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : str ) -> Union[str, Any]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[Any] , UpperCamelCase : Union[str, Any] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
return self.label_list
| 242 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> int:
'''simple docstring'''
A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = CLIPConfig
__lowerCamelCase = ['CLIPEncoderLayer']
def __init__( self , lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowercase )
A__ = CLIPVisionModel(config.vision_config )
A__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase )
A__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase )
@torch.no_grad()
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = self.vision_model(lowercase )[1] # pooled_output
A__ = self.visual_projection(lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = cosine_distance(lowercase , self.special_care_embeds ).cpu().float().numpy()
A__ = cosine_distance(lowercase , self.concept_embeds ).cpu().float().numpy()
A__ = []
A__ = image_embeds.shape[0]
for i in range(lowercase ):
A__ = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A__ = special_cos_dist[i][concept_idx]
A__ = self.special_care_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
A__ = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A__ = cos_dist[i][concept_idx]
A__ = self.concept_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowercase )
result.append(lowercase )
A__ = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = self.vision_model(lowercase )[1] # pooled_output
A__ = self.visual_projection(lowercase )
A__ = cosine_distance(lowercase , self.special_care_embeds )
A__ = cosine_distance(lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
A__ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A__ = torch.any(special_scores > 0 , dim=1 )
A__ = special_care * 0.01
A__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A__ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A__ = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 68 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Tuple = logging.get_logger(__name__)
lowercase__ : List[str] = ["""model.decoder.embed_positions.weights"""]
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
if "emb" in name:
lowerCAmelCase_ : List[str] = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
lowerCAmelCase_ : str = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
lowerCAmelCase_ : Tuple = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
lowerCAmelCase_ : Dict = name.replace('linear1' , 'fc1' )
if "linear2" in name:
lowerCAmelCase_ : Optional[int] = name.replace('linear2' , 'fc2' )
if "norm1" in name:
lowerCAmelCase_ : Optional[int] = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
lowerCAmelCase_ : List[str] = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
lowerCAmelCase_ : Optional[Any] = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
lowerCAmelCase_ : str = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCAmelCase_ : List[Any] = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def UpperCamelCase_ ( lowerCAmelCase__ : OrderedDict , lowerCAmelCase__ : int ) -> Tuple[Dict, Dict]:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = list(state_dict.keys() )
lowerCAmelCase_ : List[str] = {}
for key in keys:
lowerCAmelCase_ : Optional[int] = state_dict.pop(lowerCAmelCase__ )
lowerCAmelCase_ : int = rename_keys(lowerCAmelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCAmelCase_ : Optional[int] = val[:hidden_size, :]
lowerCAmelCase_ : Optional[int] = val[hidden_size : 2 * hidden_size, :]
lowerCAmelCase_ : Dict = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCAmelCase_ : Union[str, Any] = val
else:
lowerCAmelCase_ : Tuple = val
return state_dict, enc_dec_proj_state_dict
def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
lowerCAmelCase_ : Any = 1024
lowerCAmelCase_ : int = 24
lowerCAmelCase_ : Union[str, Any] = 16
elif checkpoint == "medium":
lowerCAmelCase_ : List[str] = 1536
lowerCAmelCase_ : List[Any] = 48
lowerCAmelCase_ : Tuple = 24
elif checkpoint == "large":
lowerCAmelCase_ : Optional[int] = 2048
lowerCAmelCase_ : Tuple = 48
lowerCAmelCase_ : List[str] = 32
else:
raise ValueError(f"Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}." )
lowerCAmelCase_ : Any = MusicgenDecoderConfig(
hidden_size=lowerCAmelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCAmelCase__ , num_attention_heads=lowerCAmelCase__ , )
return config
@torch.no_grad()
def UpperCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : List[Any]="cpu" ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : Dict = MusicGen.get_pretrained(lowerCAmelCase__ , device=lowerCAmelCase__ )
lowerCAmelCase_ : str = decoder_config_from_checkpoint(lowerCAmelCase__ )
lowerCAmelCase_ : Any = fairseq_model.lm.state_dict()
lowerCAmelCase_ ,lowerCAmelCase_ : Dict = rename_state_dict(
lowerCAmelCase__ , hidden_size=decoder_config.hidden_size )
lowerCAmelCase_ : Any = TaEncoderModel.from_pretrained('t5-base' )
lowerCAmelCase_ : Optional[Any] = EncodecModel.from_pretrained('facebook/encodec_32khz' )
lowerCAmelCase_ : Any = MusicgenForCausalLM(lowerCAmelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCAmelCase_ ,lowerCAmelCase_ : List[Any] = decoder.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
raise ValueError(f"Missing key(s) in state_dict: {missing_keys}" )
if len(lowerCAmelCase__ ) > 0:
raise ValueError(f"Unexpected key(s) in state_dict: {unexpected_keys}" )
# init the composite model
lowerCAmelCase_ : List[Any] = MusicgenForConditionalGeneration(text_encoder=lowerCAmelCase__ , audio_encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCAmelCase__ )
# check we can do a forward pass
lowerCAmelCase_ : int = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCAmelCase_ : int = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCAmelCase_ : int = model(input_ids=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained('t5-base' )
lowerCAmelCase_ : int = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
lowerCAmelCase_ : str = MusicgenProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
# set the appropriate bos/pad token ids
lowerCAmelCase_ : Dict = 2048
lowerCAmelCase_ : Dict = 2048
# set other default generation config params
lowerCAmelCase_ : Optional[Any] = int(30 * audio_encoder.config.frame_rate )
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : Any = 3.0
if pytorch_dump_folder is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
logger.info(f"Saving model {checkpoint} to {pytorch_dump_folder}" )
model.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
if repo_id:
logger.info(f"Pushing model {checkpoint} to {repo_id}" )
model.push_to_hub(lowerCAmelCase__ )
processor.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
lowercase__ : Tuple = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 289 |
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] ) -> None:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = len(lowerCAmelCase__ )
print('The following activities are selected:' )
# The first activity is always selected
lowerCAmelCase_ : str = 0
print(lowerCAmelCase__ , end=',' )
# Consider rest of the activities
for j in range(lowerCAmelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCAmelCase__ , end=',' )
lowerCAmelCase_ : Tuple = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : List[str] = [1, 3, 0, 5, 8, 5]
lowercase__ : Dict = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 289 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 191 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
lowerCamelCase_ : Optional[int] = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
lowerCamelCase_ : Optional[Any] = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
lowerCamelCase_ : Optional[int] = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=False ) -> Optional[Any]:
if return_pvalue:
a =pearsonr(__A , __A )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__A , __A )[0] )}
| 81 | 0 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
A__ : Dict = logging.get_logger('''transformers.models.speecht5''')
def a_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[str] ) -> int:
hf_model.apply_weight_norm()
__snake_case : List[Any] = checkpoint['input_conv.weight_g']
__snake_case : Optional[int] = checkpoint['input_conv.weight_v']
__snake_case : int = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
__snake_case : List[Any] = checkpoint[f'''upsamples.{i}.1.weight_g''']
__snake_case : List[str] = checkpoint[f'''upsamples.{i}.1.weight_v''']
__snake_case : Any = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__snake_case : Any = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
__snake_case : Dict = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
__snake_case : Dict = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
__snake_case : Optional[int] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
__snake_case : Any = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
__snake_case : Optional[int] = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
__snake_case : List[Any] = checkpoint['output_conv.1.weight_g']
__snake_case : Any = checkpoint['output_conv.1.weight_v']
__snake_case : List[Any] = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[int]=None ,_UpperCAmelCase : Dict=None ,) -> Dict:
if config_path is not None:
__snake_case : Tuple = SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase )
else:
__snake_case : Union[str, Any] = SpeechTaHifiGanConfig()
__snake_case : Optional[Any] = SpeechTaHifiGan(_UpperCAmelCase )
__snake_case : Dict = torch.load(_UpperCAmelCase )
load_weights(orig_checkpoint['model']['generator'] ,_UpperCAmelCase ,_UpperCAmelCase )
__snake_case : List[Any] = np.load(_UpperCAmelCase )
__snake_case : Optional[int] = stats[0].reshape(-1 )
__snake_case : Dict = stats[1].reshape(-1 )
__snake_case : str = torch.from_numpy(_UpperCAmelCase ).float()
__snake_case : str = torch.from_numpy(_UpperCAmelCase ).float()
model.save_pretrained(_UpperCAmelCase )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
A__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
A__ : Optional[int] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
A__ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A__ : List[Any] = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
A__ : List[Any] = {
'''google/electra-small-generator''': 5_1_2,
'''google/electra-base-generator''': 5_1_2,
'''google/electra-large-generator''': 5_1_2,
'''google/electra-small-discriminator''': 5_1_2,
'''google/electra-base-discriminator''': 5_1_2,
'''google/electra-large-discriminator''': 5_1_2,
}
A__ : Optional[Any] = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_INIT_CONFIGURATION
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ElectraTokenizer
def __init__( self : int , __a : List[Any]=None , __a : int=None , __a : List[str]=True , __a : Any="[UNK]" , __a : Any="[SEP]" , __a : Union[str, Any]="[PAD]" , __a : Dict="[CLS]" , __a : List[Any]="[MASK]" , __a : str=True , __a : Optional[int]=None , **__a : Optional[int] , ) -> str:
'''simple docstring'''
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
__snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __a ) != do_lower_case
or normalizer_state.get('strip_accents' , __a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __a ) != tokenize_chinese_chars
):
__snake_case : List[Any] = getattr(__a , normalizer_state.pop('type' ) )
__snake_case : str = do_lower_case
__snake_case : Optional[int] = strip_accents
__snake_case : Any = tokenize_chinese_chars
__snake_case : Union[str, Any] = normalizer_class(**__a )
__snake_case : Any = do_lower_case
def A_ ( self : Any , __a : List[str] , __a : Optional[Any]=None ) -> Dict:
'''simple docstring'''
__snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
__snake_case : int = [self.sep_token_id]
__snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Optional[int] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
__snake_case : Tuple = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 0 | 1 |
import math
from numpy import inf
from scipy.integrate import quad
def UpperCamelCase ( __lowerCamelCase : float ):
if num <= 0:
raise ValueError("math domain error" )
return quad(__lowerCamelCase , 0 , __lowerCamelCase , args=(__lowerCamelCase) )[0]
def UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float ):
return math.pow(__lowerCamelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 59 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''Hello, World!'''
_lowerCAmelCase = '''en_XX'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = Path("data_bin" )
__UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder
__UpperCamelCase : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , snake_case__ )
__UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight
__UpperCamelCase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight
__UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCamelCase : int = model.roberta.encoder.layer[i]
__UpperCamelCase : Any = xmod_sent_encoder.layers[i]
# self attention
__UpperCamelCase : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight
__UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias
__UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight
__UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight
__UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
__UpperCamelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
__UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias
__UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight
__UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias
# intermediate
__UpperCamelCase : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__UpperCamelCase : List[Any] = xmod_layer.fca.weight
__UpperCamelCase : Optional[int] = xmod_layer.fca.bias
# output
__UpperCamelCase : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__UpperCamelCase : Tuple = xmod_layer.fca.weight
__UpperCamelCase : int = xmod_layer.fca.bias
__UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight
__UpperCamelCase : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight
__UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__UpperCamelCase : Any = bert_output.adapter_modules[lang_code]
__UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code]
__UpperCamelCase : int = from_adapter.fca.weight
__UpperCamelCase : Dict = from_adapter.fca.bias
__UpperCamelCase : List[Any] = from_adapter.fca.weight
__UpperCamelCase : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias
__UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight
__UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight
__UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
__UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight
__UpperCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__UpperCamelCase : Optional[Any] = model(snake_case__ )[0]
if classification_head:
__UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) )
else:
__UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 298 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : str , _lowercase : int , _lowercase : List[str]=7 , _lowercase : List[str]=3 , _lowercase : Optional[int]=30 , _lowercase : Optional[int]=4_00 , _lowercase : Optional[Any]=True , _lowercase : Dict=None , _lowercase : Any=True , _lowercase : str=1 / 2_55 , _lowercase : str=True , _lowercase : Union[str, Any]=[0.5, 0.5, 0.5] , _lowercase : Optional[int]=[0.5, 0.5, 0.5] , _lowercase : str=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__UpperCAmelCase = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = min_resolution
__UpperCAmelCase = max_resolution
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = do_rescale
__UpperCAmelCase = rescale_factor
__UpperCAmelCase = do_normalize
__UpperCAmelCase = image_mean
__UpperCAmelCase = image_std
__UpperCAmelCase = do_pad
def a ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a ( self : Dict , _lowercase : Tuple , _lowercase : List[Any]=False ):
if not batched:
__UpperCAmelCase = image_inputs[0]
if isinstance(_lowercase , Image.Image ):
__UpperCAmelCase , __UpperCAmelCase = image.size
else:
__UpperCAmelCase , __UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
__UpperCAmelCase = int(self.size['''shortest_edge'''] * h / w )
__UpperCAmelCase = self.size['''shortest_edge''']
elif w > h:
__UpperCAmelCase = self.size['''shortest_edge''']
__UpperCAmelCase = int(self.size['''shortest_edge'''] * w / h )
else:
__UpperCAmelCase = self.size['''shortest_edge''']
__UpperCAmelCase = self.size['''shortest_edge''']
else:
__UpperCAmelCase = []
for image in image_inputs:
__UpperCAmelCase , __UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCAmelCase = max(_lowercase , key=lambda _lowercase : item[0] )[0]
__UpperCAmelCase = max(_lowercase , key=lambda _lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
a__ : Optional[int] = DetrImageProcessor if is_vision_available() else None
def a ( self : int ):
__UpperCAmelCase = DetrImageProcessingTester(self )
@property
def a ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self : str ):
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowercase , '''image_std''' ) )
self.assertTrue(hasattr(_lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowercase , '''do_rescale''' ) )
self.assertTrue(hasattr(_lowercase , '''rescale_factor''' ) )
self.assertTrue(hasattr(_lowercase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowercase , '''size''' ) )
self.assertTrue(hasattr(_lowercase , '''do_pad''' ) )
def a ( self : Tuple ):
__UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , _lowercase )
__UpperCAmelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowercase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , _lowercase )
def a ( self : List[str] ):
pass
def a ( self : Tuple ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
__UpperCAmelCase = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self : Optional[Any] ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self : int ):
# Initialize image_processing
__UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
__UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
__UpperCAmelCase , __UpperCAmelCase = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a ( self : int ):
# prepare image and target
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
__UpperCAmelCase = json.loads(f.read() )
__UpperCAmelCase = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
__UpperCAmelCase = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
__UpperCAmelCase = image_processing(images=_lowercase , annotations=_lowercase , return_tensors='''pt''' )
# verify pixel values
__UpperCAmelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , _lowercase )
__UpperCAmelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowercase , atol=1E-4 ) )
# verify area
__UpperCAmelCase = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowercase ) )
# verify boxes
__UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowercase )
__UpperCAmelCase = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowercase , atol=1E-3 ) )
# verify image_id
__UpperCAmelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowercase ) )
# verify is_crowd
__UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowercase ) )
# verify class_labels
__UpperCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowercase ) )
# verify orig_size
__UpperCAmelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowercase ) )
# verify size
__UpperCAmelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowercase ) )
@slow
def a ( self : int ):
# prepare image, target and masks_path
__UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
__UpperCAmelCase = json.loads(f.read() )
__UpperCAmelCase = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
__UpperCAmelCase = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
__UpperCAmelCase = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
__UpperCAmelCase = image_processing(images=_lowercase , annotations=_lowercase , masks_path=_lowercase , return_tensors='''pt''' )
# verify pixel values
__UpperCAmelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , _lowercase )
__UpperCAmelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _lowercase , atol=1E-4 ) )
# verify area
__UpperCAmelCase = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _lowercase ) )
# verify boxes
__UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _lowercase )
__UpperCAmelCase = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _lowercase , atol=1E-3 ) )
# verify image_id
__UpperCAmelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _lowercase ) )
# verify is_crowd
__UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _lowercase ) )
# verify class_labels
__UpperCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _lowercase ) )
# verify masks
__UpperCAmelCase = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _lowercase )
# verify orig_size
__UpperCAmelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _lowercase ) )
# verify size
__UpperCAmelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _lowercase ) )
| 86 |
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_lowercase : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(
_lowerCAmelCase , r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class _UpperCAmelCase ( _lowerCAmelCase ):
def a ( self : List[Any] , _lowercase : GenericTensor ):
if self.framework == "tf":
__UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowercase )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def a ( self : List[str] , _lowercase : GenericTensor ):
__UpperCAmelCase = self.get_masked_index(_lowercase )
__UpperCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def a ( self : Optional[int] , _lowercase : GenericTensor ):
if isinstance(_lowercase , _lowercase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(_lowercase )
def a ( self : List[str] , _lowercase : Optional[int] , _lowercase : Tuple=None , **_lowercase : Tuple ):
if return_tensors is None:
__UpperCAmelCase = self.framework
__UpperCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase )
self.ensure_exactly_one_mask_token(_lowercase )
return model_inputs
def a ( self : Optional[int] , _lowercase : Tuple ):
__UpperCAmelCase = self.model(**_lowercase )
__UpperCAmelCase = model_inputs['''input_ids''']
return model_outputs
def a ( self : Optional[int] , _lowercase : List[str] , _lowercase : Optional[Any]=5 , _lowercase : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__UpperCAmelCase = target_ids.shape[0]
__UpperCAmelCase = model_outputs['''input_ids'''][0]
__UpperCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
__UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__UpperCAmelCase = outputs.numpy()
__UpperCAmelCase = outputs[0, masked_index, :]
__UpperCAmelCase = stable_softmax(_lowercase , axis=-1 )
if target_ids is not None:
__UpperCAmelCase = tf.gather_nd(tf.squeeze(_lowercase , 0 ) , target_ids.reshape(-1 , 1 ) )
__UpperCAmelCase = tf.expand_dims(_lowercase , 0 )
__UpperCAmelCase = tf.math.top_k(_lowercase , k=_lowercase )
__UpperCAmelCase , __UpperCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
__UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=_lowercase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__UpperCAmelCase = outputs[0, masked_index, :]
__UpperCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
__UpperCAmelCase = probs[..., target_ids]
__UpperCAmelCase , __UpperCAmelCase = probs.topk(_lowercase )
__UpperCAmelCase = []
__UpperCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__UpperCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__UpperCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
__UpperCAmelCase = target_ids[p].tolist()
__UpperCAmelCase = p
# Filter padding out:
__UpperCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__UpperCAmelCase = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
__UpperCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(_lowercase )
result.append(_lowercase )
if single_mask:
return result[0]
return result
def a ( self : str , _lowercase : List[Any] , _lowercase : List[Any]=None ):
if isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = [targets]
try:
__UpperCAmelCase = self.tokenizer.get_vocab()
except Exception:
__UpperCAmelCase = {}
__UpperCAmelCase = []
for target in targets:
__UpperCAmelCase = vocab.get(_lowercase , _lowercase )
if id_ is None:
__UpperCAmelCase = self.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , max_length=1 , truncation=_lowercase , )['''input_ids''']
if len(_lowercase ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
__UpperCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
__UpperCAmelCase = list(set(_lowercase ) )
if len(_lowercase ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
__UpperCAmelCase = np.array(_lowercase )
return target_ids
def a ( self : int , _lowercase : Dict=None , _lowercase : Optional[Any]=None ):
__UpperCAmelCase = {}
if targets is not None:
__UpperCAmelCase = self.get_target_ids(_lowercase , _lowercase )
__UpperCAmelCase = target_ids
if top_k is not None:
__UpperCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self : Union[str, Any] , _lowercase : Optional[Any] , *_lowercase : Union[str, Any] , **_lowercase : int ):
__UpperCAmelCase = super().__call__(_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
| 86 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Optional[Any] = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Dict = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
lowercase__ :Optional[Any] = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowercase__ :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 101 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__: Optional[int] = logging.get_logger(__name__)
def snake_case_ ( _lowerCAmelCase : Optional[int] ) -> Optional[int]:
UpperCAmelCase : Tuple = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
UpperCAmelCase : Tuple = 1024
UpperCAmelCase : List[Any] = 4096
UpperCAmelCase : str = 24
UpperCAmelCase : List[Any] = 16
UpperCAmelCase : str = [5, 11, 17, 23]
UpperCAmelCase : List[Any] = [256, 512, 1024, 1024]
UpperCAmelCase : Tuple = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
UpperCAmelCase : Optional[Any] = 768
UpperCAmelCase : Tuple = [1, 1, 1, 0.5]
UpperCAmelCase : int = [256, 512, 768, 768]
UpperCAmelCase : Any = 150
UpperCAmelCase : Tuple = 16
UpperCAmelCase : Any = (1, 384, 384)
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Tuple = '''project'''
if "ade" in checkpoint_url:
UpperCAmelCase : Any = True
UpperCAmelCase : str = 768
UpperCAmelCase : Optional[int] = [1, 1, 1, 0.5]
UpperCAmelCase : List[Any] = 150
UpperCAmelCase : List[Any] = 16
UpperCAmelCase : str = '''huggingface/label-files'''
UpperCAmelCase : Tuple = '''ade20k-id2label.json'''
UpperCAmelCase : Any = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : List[Any] = idalabel
UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCAmelCase : Union[str, Any] = [1, 150, 480, 480]
return config, expected_shape
def snake_case_ ( _lowerCAmelCase : Union[str, Any] ) -> int:
UpperCAmelCase : List[str] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : Tuple ) -> Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase : Tuple = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
UpperCAmelCase : int = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
UpperCAmelCase : Tuple = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
UpperCAmelCase : Any = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
UpperCAmelCase : str = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
UpperCAmelCase : Any = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
UpperCAmelCase : Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase : Optional[Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
UpperCAmelCase : Dict = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
UpperCAmelCase : Tuple = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
UpperCAmelCase : Tuple = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
UpperCAmelCase : str = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
UpperCAmelCase : Dict = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
UpperCAmelCase : int = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
UpperCAmelCase : Tuple = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
UpperCAmelCase : int = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
UpperCAmelCase : List[str] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase : str = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
UpperCAmelCase : List[str] = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
UpperCAmelCase : Any = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
UpperCAmelCase : Optional[int] = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
UpperCAmelCase : Tuple = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase : Dict = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase : int = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase : Optional[Any] = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase : List[Any] = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase : Any = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase : Optional[int] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase : str = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase : List[str] = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase : Tuple = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase : int = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
UpperCAmelCase : Optional[int] = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
UpperCAmelCase : Dict = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
UpperCAmelCase : Any = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
UpperCAmelCase : Optional[int] = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
UpperCAmelCase : Union[str, Any] = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
UpperCAmelCase : List[Any] = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
UpperCAmelCase : Optional[int] = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
UpperCAmelCase : Optional[Any] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
UpperCAmelCase : Optional[int] = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
UpperCAmelCase : List[Any] = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
UpperCAmelCase : List[str] = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
UpperCAmelCase : List[Any] = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
UpperCAmelCase : List[Any] = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
UpperCAmelCase : Tuple = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Optional[int] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
UpperCAmelCase : Tuple = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : Tuple = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : int = in_proj_bias[: config.hidden_size]
UpperCAmelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : str = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def snake_case_ ( ) -> List[str]:
UpperCAmelCase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] ) -> Any:
UpperCAmelCase , UpperCAmelCase : int = get_dpt_config(_lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
UpperCAmelCase : List[Any] = torch.load(_lowerCAmelCase , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(_lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase : Any = state_dict.pop(_lowerCAmelCase )
UpperCAmelCase : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
UpperCAmelCase : Optional[Any] = DPTForSemanticSegmentation(_lowerCAmelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
# Check outputs on an image
UpperCAmelCase : int = 480 if '''ade''' in checkpoint_url else 384
UpperCAmelCase : List[Any] = DPTImageProcessor(size=_lowerCAmelCase )
UpperCAmelCase : Dict = prepare_img()
UpperCAmelCase : Optional[int] = image_processor(_lowerCAmelCase , return_tensors='''pt''' )
# forward pass
UpperCAmelCase : Any = model(**_lowerCAmelCase ).logits if '''ade''' in checkpoint_url else model(**_lowerCAmelCase ).predicted_depth
if show_prediction:
UpperCAmelCase : Dict = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=_lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCAmelCase )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
UpperCamelCase__: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
UpperCamelCase__: Tuple = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 23 | 0 |
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( A_ ):
lowercase__ = ['''pixel_values''']
def __init__( self : Optional[Any] , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 255 , snake_case_ : bool = True , snake_case_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , snake_case_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **snake_case_ : Any , ) -> None:
'''simple docstring'''
super().__init__(**snake_case_ )
A__ = size if size is not None else {"shortest_edge": 224}
A__ = get_size_dict(snake_case_ , default_to_square=snake_case_ )
A__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
A__ = get_size_dict(snake_case_ , param_name="crop_size" )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_center_crop
A__ = crop_size
A__ = do_rescale
A__ = rescale_factor
A__ = do_normalize
A__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
A__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __magic_name__ ( self : int , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : int , ) -> np.ndarray:
'''simple docstring'''
A__ = get_size_dict(snake_case_ , default_to_square=snake_case_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
A__ = int((256 / 224) * size["shortest_edge"] )
A__ = get_resize_output_image_size(snake_case_ , size=snake_case_ , default_to_square=snake_case_ )
A__ = {"height": output_size[0], "width": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
snake_case_ , size=(size_dict["height"], size_dict["width"]) , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __magic_name__ ( self : int , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Dict , ) -> np.ndarray:
'''simple docstring'''
A__ = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(snake_case_ , size=(size["height"], size["width"]) , data_format=snake_case_ , **snake_case_ )
def __magic_name__ ( self : str , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : int , ) -> np.ndarray:
'''simple docstring'''
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : np.ndarray , snake_case_ : Union[float, List[float]] , snake_case_ : Union[float, List[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def __magic_name__ ( self : str , snake_case_ : ImageInput , snake_case_ : Optional[bool] = None , snake_case_ : Optional[Dict[str, int]] = None , snake_case_ : PILImageResampling = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[Dict[str, int]] = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[float] = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[Union[float, Iterable[float]]] = None , snake_case_ : Optional[Union[float, Iterable[float]]] = None , snake_case_ : Optional[TensorType] = None , snake_case_ : ChannelDimension = ChannelDimension.FIRST , **snake_case_ : int , ) -> BatchFeature:
'''simple docstring'''
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_normalize if do_normalize is not None else self.do_normalize
A__ = image_mean if image_mean is not None else self.image_mean
A__ = image_std if image_std is not None else self.image_std
A__ = size if size is not None else self.size
A__ = get_size_dict(snake_case_ , default_to_square=snake_case_ )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(snake_case_ , param_name="crop_size" )
A__ = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
A__ = [self.resize(snake_case_ , snake_case_ , snake_case_ ) for image in images]
if do_center_crop:
A__ = [self.center_crop(snake_case_ , snake_case_ ) for image in images]
if do_rescale:
A__ = [self.rescale(snake_case_ , snake_case_ ) for image in images]
if do_normalize:
A__ = [self.normalize(snake_case_ , snake_case_ , snake_case_ ) for image in images]
A__ = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
A__ = {"pixel_values": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 230 |
"""simple docstring"""
import random
from .binary_exp_mod import bin_exp_mod
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=10_00 ) -> Optional[Any]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
A__ = n - 1
A__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
A__ = 0
while count < prec:
A__ = random.randint(2 , n - 1 )
A__ = bin_exp_mod(lowercase_ , lowercase_ , lowercase_ )
if b != 1:
A__ = True
for _ in range(lowercase_ ):
if b == n - 1:
A__ = False
break
A__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = abs(int(input("Enter bound : ").strip()))
print("Here's the list of primes:")
print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 230 | 1 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def __UpperCAmelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
__snake_case : str = 1.5
__snake_case : List[str] = int(factor * num_class_images )
__snake_case : Optional[Any] = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCAmelCase_ , aesthetic_weight=0.1 )
os.makedirs(F"{class_data_dir}/images" , exist_ok=UpperCAmelCase_ )
if len(list(Path(F"{class_data_dir}/images" ).iterdir() ) ) >= num_class_images:
return
while True:
__snake_case : int = client.query(text=UpperCAmelCase_ )
if len(UpperCAmelCase_ ) >= factor * num_class_images or num_images > 1E4:
break
else:
__snake_case : Union[str, Any] = int(factor * num_images )
__snake_case : str = ClipClient(
url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=UpperCAmelCase_ , aesthetic_weight=0.1 , )
__snake_case : Dict = 0
__snake_case : Any = 0
__snake_case : List[str] = tqdm(desc='downloading real regularization images' , total=UpperCAmelCase_ )
with open(F"{class_data_dir}/caption.txt" , 'w' ) as fa, open(F"{class_data_dir}/urls.txt" , 'w' ) as fa, open(
F"{class_data_dir}/images.txt" , 'w' ) as fa:
while total < num_class_images:
__snake_case : Union[str, Any] = class_images[count]
count += 1
try:
__snake_case : str = requests.get(images['url'] )
if img.status_code == 2_00:
__snake_case : Dict = Image.open(BytesIO(img.content ) )
with open(F"{class_data_dir}/images/{total}.jpg" , 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F"{class_data_dir}/images/{total}.jpg" + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def __UpperCAmelCase ( ) -> Tuple:
'''simple docstring'''
__snake_case : str = argparse.ArgumentParser('' , add_help=UpperCAmelCase_ )
parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=UpperCAmelCase_ , type=UpperCAmelCase_ )
parser.add_argument('--class_data_dir' , help='path to save images' , required=UpperCAmelCase_ , type=UpperCAmelCase_ )
parser.add_argument('--num_class_images' , help='number of images to download' , default=2_00 , type=UpperCAmelCase_ )
return parser.parse_args()
if __name__ == "__main__":
_a : Optional[Any]= parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 172 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCamelCase ( nn.Module ):
UpperCAmelCase : int
UpperCAmelCase : jnp.dtype = jnp.floataa
def _lowercase (self : Any) -> Optional[int]:
__snake_case : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : Any , _A : Any) -> str:
__snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = hidden_states.shape
__snake_case : Union[str, Any] = jax.image.resize(
_A , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__snake_case : List[Any] = self.conv(_A)
return hidden_states
class UpperCamelCase ( nn.Module ):
UpperCAmelCase : int
UpperCAmelCase : jnp.dtype = jnp.floataa
def _lowercase (self : Optional[Any]) -> List[Any]:
__snake_case : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self : int , _A : str) -> Any:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__snake_case : Union[str, Any] = self.conv(_A)
return hidden_states
class UpperCamelCase ( nn.Module ):
UpperCAmelCase : int
UpperCAmelCase : int = None
UpperCAmelCase : float = 0.0
UpperCAmelCase : bool = None
UpperCAmelCase : jnp.dtype = jnp.floataa
def _lowercase (self : List[str]) -> Dict:
__snake_case : str = self.in_channels if self.out_channels is None else self.out_channels
__snake_case : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
__snake_case : str = nn.Conv(
_A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case : Optional[int] = nn.Dense(_A , dtype=self.dtype)
__snake_case : int = nn.GroupNorm(num_groups=32 , epsilon=1E-5)
__snake_case : str = nn.Dropout(self.dropout_prob)
__snake_case : Dict = nn.Conv(
_A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__snake_case : List[str] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__snake_case : Optional[Any] = None
if use_nin_shortcut:
__snake_case : List[str] = nn.Conv(
_A , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__(self : List[Any] , _A : Union[str, Any] , _A : str , _A : int=True) -> Any:
__snake_case : List[Any] = hidden_states
__snake_case : Optional[Any] = self.norma(_A)
__snake_case : int = nn.swish(_A)
__snake_case : Optional[int] = self.conva(_A)
__snake_case : Dict = self.time_emb_proj(nn.swish(_A))
__snake_case : List[str] = jnp.expand_dims(jnp.expand_dims(_A , 1) , 1)
__snake_case : Any = hidden_states + temb
__snake_case : Tuple = self.norma(_A)
__snake_case : Dict = nn.swish(_A)
__snake_case : Union[str, Any] = self.dropout(_A , _A)
__snake_case : Union[str, Any] = self.conva(_A)
if self.conv_shortcut is not None:
__snake_case : List[Any] = self.conv_shortcut(_A)
return hidden_states + residual
| 172 | 1 |
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_SCREAMING_SNAKE_CASE : Dict = "src/transformers"
_SCREAMING_SNAKE_CASE : Dict = "docs/source/en"
_SCREAMING_SNAKE_CASE : int = "."
def UpperCamelCase_( snake_case : Any , snake_case : Union[str, Any] , snake_case : Optional[Any] ):
'''simple docstring'''
with open(snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
snake_case_ = f.readlines()
# Find the start prompt.
snake_case_ = 0
while not lines[start_index].startswith(snake_case ):
start_index += 1
start_index += 1
snake_case_ = start_index
while not lines[end_index].startswith(snake_case ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_SCREAMING_SNAKE_CASE : str = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_SCREAMING_SNAKE_CASE : str = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_SCREAMING_SNAKE_CASE : Dict = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
_SCREAMING_SNAKE_CASE : Dict = direct_transformers_import(TRANSFORMERS_PATH)
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , snake_case )
return [m.group(0 ) for m in matches]
def UpperCamelCase_( snake_case : Any , snake_case : Any ):
'''simple docstring'''
snake_case_ = 2 if text == "✅" or text == "❌" else len(snake_case )
snake_case_ = (width - text_length) // 2
snake_case_ = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
snake_case_ = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
snake_case_ = {name: config.replace("Config" , "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
snake_case_ = collections.defaultdict(snake_case )
snake_case_ = collections.defaultdict(snake_case )
snake_case_ = collections.defaultdict(snake_case )
snake_case_ = collections.defaultdict(snake_case )
snake_case_ = collections.defaultdict(snake_case )
# Let's lookup through all transformers object (once).
for attr_name in dir(snake_case ):
snake_case_ = None
if attr_name.endswith("Tokenizer" ):
snake_case_ = slow_tokenizers
snake_case_ = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
snake_case_ = fast_tokenizers
snake_case_ = attr_name[:-1_3]
elif _re_tf_models.match(snake_case ) is not None:
snake_case_ = tf_models
snake_case_ = _re_tf_models.match(snake_case ).groups()[0]
elif _re_flax_models.match(snake_case ) is not None:
snake_case_ = flax_models
snake_case_ = _re_flax_models.match(snake_case ).groups()[0]
elif _re_pt_models.match(snake_case ) is not None:
snake_case_ = pt_models
snake_case_ = _re_pt_models.match(snake_case ).groups()[0]
if lookup_dict is not None:
while len(snake_case ) > 0:
if attr_name in model_name_to_prefix.values():
snake_case_ = True
break
# Try again after removing the last word in the name
snake_case_ = "".join(camel_case_split(snake_case )[:-1] )
# Let's build that table!
snake_case_ = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
snake_case_ = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
snake_case_ = [len(snake_case ) + 2 for c in columns]
snake_case_ = max([len(snake_case ) for name in model_names] ) + 2
# Build the table per se
snake_case_ = "|" + "|".join([_center_text(snake_case , snake_case ) for c, w in zip(snake_case , snake_case )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
snake_case_ = {True: "✅", False: "❌"}
for name in model_names:
snake_case_ = model_name_to_prefix[name]
snake_case_ = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(snake_case , snake_case ) for l, w in zip(snake_case , snake_case )] ) + "|\n"
return table
def UpperCamelCase_( snake_case : Any=False ):
'''simple docstring'''
snake_case_ , snake_case_ , snake_case_ , snake_case_ = _find_text_in_file(
filename=os.path.join(snake_case , "index.md" ) , start_prompt="<!--This table is updated automatically from the auto modules" , end_prompt="<!-- End table-->" , )
snake_case_ = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(snake_case , "index.md" ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 92 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCamelCase_( snake_case : str , snake_case : complex , snake_case : str = "x" , snake_case : float = 1_0**-1_0 , snake_case : int = 1 , ):
'''simple docstring'''
snake_case_ = symbols(snake_case )
snake_case_ = lambdify(snake_case , snake_case )
snake_case_ = lambdify(snake_case , diff(snake_case , snake_case ) )
snake_case_ = starting_point
while True:
if diff_function(snake_case ) != 0:
snake_case_ = prev_guess - multiplicity * func(snake_case ) / diff_function(
snake_case )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
snake_case_ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(F"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}")
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
F"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
F"{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}",
)
# Find root of cos(x)
print(F"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
| 92 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __snake_case ( __snake_case ):
_a : Optional[int]= "donut-swin"
_a : Union[str, Any]= {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self ,snake_case=224 ,snake_case=4 ,snake_case=3 ,snake_case=96 ,snake_case=[2, 2, 6, 2] ,snake_case=[3, 6, 12, 24] ,snake_case=7 ,snake_case=4.0 ,snake_case=True ,snake_case=0.0 ,snake_case=0.0 ,snake_case=0.1 ,snake_case="gelu" ,snake_case=False ,snake_case=0.02 ,snake_case=1e-5 ,**snake_case ,):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
lowercase : Any = image_size
lowercase : Optional[Any] = patch_size
lowercase : Union[str, Any] = num_channels
lowercase : List[Any] = embed_dim
lowercase : Dict = depths
lowercase : List[Any] = len(lowerCamelCase_ )
lowercase : Optional[Any] = num_heads
lowercase : Optional[Any] = window_size
lowercase : Optional[Any] = mlp_ratio
lowercase : Any = qkv_bias
lowercase : Union[str, Any] = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : str = drop_path_rate
lowercase : Dict = hidden_act
lowercase : List[Any] = use_absolute_embeddings
lowercase : Optional[int] = layer_norm_eps
lowercase : Any = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase : Any = int(embed_dim * 2 ** (len(lowerCamelCase_ ) - 1) )
| 20 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase_ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : Union[str, Any] = "ernie_m"
A__ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self: str ,lowerCamelCase_: int = 250002 ,lowerCamelCase_: int = 768 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 12 ,lowerCamelCase_: int = 3072 ,lowerCamelCase_: str = "gelu" ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: float = 0.1 ,lowerCamelCase_: int = 514 ,lowerCamelCase_: float = 0.0_2 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: float = 1e-05 ,lowerCamelCase_: Any=None ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Tuple=0.0 ,**lowerCamelCase_: Optional[int] ,) -> Optional[Any]:
super().__init__(pad_token_id=lowerCamelCase_ ,**lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : List[Any] = classifier_dropout
UpperCAmelCase_ : str = is_decoder
UpperCAmelCase_ : List[str] = act_dropout
| 345 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
a : Tuple = logging.get_logger(__name__)
class __UpperCamelCase ( a__ ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 350 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->bool:
'''simple docstring'''
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79 | 0 |
def _a ( lowerCamelCase: list ) -> float:
'''simple docstring'''
__A = 0
while len(lowerCamelCase ) > 1:
__A = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__A = files.index(min(lowerCamelCase ) )
temp += files[min_index]
files.pop(lowerCamelCase )
files.append(lowerCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 117 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case__ : List[str] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Tuple = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
snake_case__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 117 | 1 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@require_torch
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
snake_case_ : Tuple = load_dataset('''ashraq/esc50''' )
snake_case_ : Tuple = dataset['''train''']['''audio'''][-1]['''array''']
snake_case_ : Optional[Any] = audio_classifier(__magic_name__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__magic_name__ ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
snake_case_ : Any = load_dataset('''ashraq/esc50''' )
snake_case_ : str = dataset['''train''']['''audio'''][-1]['''array''']
snake_case_ : Optional[int] = audio_classifier(__magic_name__ , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
] , )
snake_case_ : int = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
snake_case_ : Optional[int] = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(__magic_name__ ) , [
[
{'''score''': 0.999, '''label''': '''Sound of a dog'''},
{'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
pass
| 279 |
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
lowerCAmelCase_ = '''__DUMMY_TRANSFORMERS_USER__'''
lowerCAmelCase_ = '''Dummy User'''
lowerCAmelCase_ = '''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'''
lowerCAmelCase_ = '''https://hub-ci.huggingface.co'''
lowerCAmelCase_ = CI_HUB_ENDPOINT + '''/datasets/{repo_id}/resolve/{revision}/{path}'''
lowerCAmelCase_ = CI_HUB_ENDPOINT + '''/{repo_id}/resolve/{revision}/{filename}'''
lowerCAmelCase_ = Path('''~/.huggingface/hub_ci_token''').expanduser()
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
monkeypatch.setattr(
'''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , _UpperCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , _UpperCamelCase )
monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , _UpperCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , _UpperCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
HfFolder.save_token(_UpperCamelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
return HfApi(endpoint=_UpperCamelCase )
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : Union[str, Any] = HfFolder.get_token()
HfFolder.save_token(_UpperCamelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_UpperCamelCase )
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
def _cleanup_repo(_UpperCamelCase ):
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
return _cleanup_repo
@pytest.fixture
def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]:
"""simple docstring"""
@contextmanager
def _temporary_repo(_UpperCamelCase ):
try:
yield repo_id
finally:
cleanup_repo(_UpperCamelCase )
return _temporary_repo
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = f'''repo_txt_data-{int(time.time() * 10E3 )}'''
snake_case_ : Any = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='''data/text_data.txt''' , repo_id=_UpperCamelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : int = f'''repo_zipped_txt_data-{int(time.time() * 10E3 )}'''
snake_case_ : Tuple = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='''data.zip''' , repo_id=_UpperCamelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='''session''' )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : List[str] = f'''repo_zipped_img_data-{int(time.time() * 10E3 )}'''
snake_case_ : str = f'''{CI_HUB_USER}/{repo_name}'''
hf_api.create_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' , private=_UpperCamelCase )
hf_api.upload_file(
token=_UpperCamelCase , path_or_fileobj=str(_UpperCamelCase ) , path_in_repo='''data.zip''' , repo_id=_UpperCamelCase , repo_type='''dataset''' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCamelCase , token=_UpperCamelCase , repo_type='''dataset''' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
return hf_private_dataset_repo_zipped_img_data_
| 279 | 1 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
A : Optional[Any] = datasets.logging.get_logger(__name__)
A : str = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
A : List[str] = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
A : Optional[Any] = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
A : int = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class _lowercase ( datasets.Metric):
"""simple docstring"""
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , )
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : List[str] ):
'''simple docstring'''
if self.config_name == "default":
logger.warning(
"Using default BLEURT-Base checkpoint for sequence maximum length 128. "
"You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')." )
lowerCamelCase__ : List[str] = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
lowerCamelCase__ : Any = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowerCamelCase__ : str = self.config_name.upper()
else:
raise KeyError(
f"{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}" )
# download the model checkpoint specified by self.config_name and set up the scorer
lowerCamelCase__ : Dict = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowerCamelCase__ : int = score.BleurtScorer(os.path.join(__lowerCamelCase , __lowerCamelCase ) )
def lowerCAmelCase ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = self.scorer.score(references=__lowerCamelCase , candidates=__lowerCamelCase )
return {"scores": scores}
| 184 |
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
while a != 0:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = b % a, a
return b
def lowercase_ ( _A : int , _A : int ):
"""simple docstring"""
if gcd(_A , _A ) != 1:
lowerCamelCase__ : List[str] = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(_A )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = 1, 0, a
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = 0, 1, m
while va != 0:
lowerCamelCase__ : Tuple = ua // va
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 184 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Optional[int] = tempfile.mkdtemp()
# fmt: off
lowercase__: List[str] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase__: int = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
lowercase__: Tuple = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowercase__: Optional[int] = {'unk_token': '<unk>'}
lowercase__: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase__: Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase__ ) )
lowercase__: Tuple = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowercase__: int = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__: Optional[int] = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: List[str] = self.get_tokenizer()
lowercase__: int = self.get_rust_tokenizer()
lowercase__: Union[str, Any] = self.get_image_processor()
lowercase__: Tuple = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__: str = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
lowercase__: int = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__: Dict = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: Optional[int] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__: Union[str, Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowercase__: Optional[Any] = self.get_image_processor(do_normalize=lowerCAmelCase__ )
lowercase__: Optional[int] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCAmelCase__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Any = self.get_image_processor()
lowercase__: Optional[int] = self.get_tokenizer()
lowercase__: int = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
lowercase__: Optional[int] = self.prepare_image_inputs()
lowercase__: Tuple = image_processor(lowerCAmelCase__ , return_tensors='np' )
lowercase__: List[str] = processor(images=lowerCAmelCase__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Optional[int] = self.get_image_processor()
lowercase__: List[str] = self.get_tokenizer()
lowercase__: Any = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
lowercase__: List[Any] = 'lower newer'
lowercase__: List[Any] = processor(text=lowerCAmelCase__ , return_tensors='np' )
lowercase__: List[Any] = tokenizer(lowerCAmelCase__ , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = self.get_image_processor()
lowercase__: str = self.get_tokenizer()
lowercase__: Any = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
lowercase__: Optional[int] = 'lower newer'
lowercase__: List[Any] = self.prepare_image_inputs()
lowercase__: Union[str, Any] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Any = 'google/owlvit-base-patch32'
lowercase__: Tuple = OwlViTProcessor.from_pretrained(lowerCAmelCase__ )
lowercase__: Dict = ['cat', 'nasa badge']
lowercase__: List[Any] = processor(text=lowerCAmelCase__ )
lowercase__: Optional[Any] = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: Optional[Any] = 'google/owlvit-base-patch32'
lowercase__: List[Any] = OwlViTProcessor.from_pretrained(lowerCAmelCase__ )
lowercase__: Tuple = [['cat', 'nasa badge'], ['person']]
lowercase__: Optional[Any] = processor(text=lowerCAmelCase__ )
lowercase__: Dict = 16
lowercase__: List[Any] = len(lowerCAmelCase__ )
lowercase__: int = max([len(lowerCAmelCase__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Dict = 'google/owlvit-base-patch32'
lowercase__: Any = OwlViTProcessor.from_pretrained(lowerCAmelCase__ )
lowercase__: List[Any] = ['cat', 'nasa badge']
lowercase__: Dict = processor(text=lowerCAmelCase__ )
lowercase__: Dict = 16
lowercase__: List[str] = inputs['input_ids']
lowercase__: Tuple = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: List[Any] = self.get_image_processor()
lowercase__: int = self.get_tokenizer()
lowercase__: str = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
lowercase__: Optional[int] = self.prepare_image_inputs()
lowercase__: Optional[Any] = self.prepare_image_inputs()
lowercase__: Union[str, Any] = processor(images=lowerCAmelCase__ , query_images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[str] = self.get_image_processor()
lowercase__: str = self.get_tokenizer()
lowercase__: Tuple = OwlViTProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
lowercase__: List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__: Dict = processor.batch_decode(lowerCAmelCase__ )
lowercase__: List[Any] = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 363 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCAmelCase = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_28,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class __a ( unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ) -> Any:
'''simple docstring'''
lowercase__: List[Any] = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls ) -> str:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-config' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: List[str] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('test-config' , use_auth_token=self._token )
lowercase__: str = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ , repo_id='test-config' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
lowercase__: Dict = BertConfig.from_pretrained(F'{USER}/test-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token )
lowercase__: Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCAmelCase__ , repo_id='valid_org/test-config-org' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
lowercase__: Union[str, Any] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowercase__: Tuple = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowercase__: int = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' )
self.assertEqual(new_config.attribute , 42 )
class __a ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Any = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowercase__: List[Any] = c.n_embd + 1 # int
lowercase__: Any = c.resid_pdrop + 1.0 # float
lowercase__: Any = not c.scale_attn_weights # bool
lowercase__: List[str] = c.summary_type + 'foo' # str
c.update_from_string(
F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' )
self.assertEqual(lowerCAmelCase__ , c.n_embd , 'mismatch for key: n_embd' )
self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCAmelCase__ , c.summary_type , 'mismatch for key: summary_type' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Any = PretrainedConfig()
lowercase__: Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCAmelCase__ , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowercase__: List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F' {", ".join(lowerCAmelCase__ )}.' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase__: str = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowercase__: str = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' )
self.assertIsNotNone(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# A mock response for an HTTP head request to emulate server down
lowercase__: Optional[Any] = mock.Mock()
lowercase__: Tuple = 500
lowercase__: Any = {}
lowercase__: Dict = HTTPError
lowercase__: Optional[Any] = {}
# Download this model to make sure it's in the cache.
lowercase__: Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=lowerCAmelCase__ ) as mock_head:
lowercase__: List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
# This test is for deprecated behavior and can be removed in v5
lowercase__: Tuple = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Tuple = AutoConfig.from_pretrained('bert-base-cased' )
lowercase__: Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCAmelCase__ )
lowercase__: Optional[int] = 2
json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , 'config.4.0.0.json' ) , 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowercase__: str = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowercase__: Dict = ['config.42.0.0.json']
lowercase__: int = 768
configuration.save_pretrained(lowerCAmelCase__ )
shutil.move(os.path.join(lowerCAmelCase__ , 'config.4.0.0.json' ) , os.path.join(lowerCAmelCase__ , 'config.42.0.0.json' ) )
lowercase__: Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 768 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowercase__: Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowercase__: Tuple = 'v4.0.0'
lowercase__ , lowercase__: List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCAmelCase__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowercase__: Union[str, Any] = 'v3.0.0'
lowercase__: Optional[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(old_configuration.hidden_size , 768 )
| 288 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=7 ,__UpperCAmelCase=3 ,__UpperCAmelCase=18 ,__UpperCAmelCase=30 ,__UpperCAmelCase=400 ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,__UpperCAmelCase=True ,) -> str:
lowerCAmelCase__ : Dict = size if size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase__ : Union[str, Any] = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : Union[str, Any] = num_channels
lowerCAmelCase__ : str = image_size
lowerCAmelCase__ : Tuple = min_resolution
lowerCAmelCase__ : Union[str, Any] = max_resolution
lowerCAmelCase__ : Any = do_resize
lowerCAmelCase__ : Optional[Any] = size
lowerCAmelCase__ : Any = do_normalize
def UpperCAmelCase_ ( self ) -> str:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = ImageGPTImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Dict = ImageGPTImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase ,"""clusters""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""size""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""do_normalize""" ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} )
lowerCAmelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
lowerCAmelCase__ : List[Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(__UpperCAmelCase ,obj[key] ) )
else:
self.assertEqual(obj[key] ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Union[str, Any] = os.path.join(__UpperCAmelCase ,"""image_processor.json""" )
image_processor_first.to_json_file(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = self.image_processing_class.from_json_file(__UpperCAmelCase ).to_dict()
lowerCAmelCase__ : Dict = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__UpperCAmelCase ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = self.image_processing_class.from_pretrained(__UpperCAmelCase ).to_dict()
lowerCAmelCase__ : Tuple = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(__UpperCAmelCase ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,__UpperCAmelCase )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def UpperCAmelCase_ ( self ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : str = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
lowerCAmelCase__ : int = Image.open(dataset[4]["""file"""] )
lowerCAmelCase__ : List[str] = Image.open(dataset[5]["""file"""] )
lowerCAmelCase__ : List[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : Tuple = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
lowerCAmelCase__ : Union[str, Any] = prepare_images()
# test non-batched
lowerCAmelCase__ : Any = image_processing(images[0] ,return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1024) )
lowerCAmelCase__ : Union[str, Any] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,__UpperCAmelCase )
# test batched
lowerCAmelCase__ : Optional[Any] = image_processing(__UpperCAmelCase ,return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1024) )
lowerCAmelCase__ : Union[str, Any] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,__UpperCAmelCase )
| 37 |
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
_lowerCAmelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_lowerCAmelCase = F"""down_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_lowerCAmelCase = F"""down_blocks.{i}.attentions.{j}."""
_lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_lowerCAmelCase = F"""up_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_lowerCAmelCase = F"""up_blocks.{i}.attentions.{j}."""
_lowerCAmelCase = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.conv."""
_lowerCAmelCase = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0."""
_lowerCAmelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_lowerCAmelCase = '''mid_block.attentions.0.'''
_lowerCAmelCase = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_lowerCAmelCase = F"""mid_block.resnets.{j}."""
_lowerCAmelCase = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCAmelCase__ : Optional[int] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCAmelCase__ : Any = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCAmelCase__ : List[Any] = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = v
lowerCAmelCase__ : Tuple = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_lowerCAmelCase = F"""encoder.down_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0."""
_lowerCAmelCase = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0."""
_lowerCAmelCase = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_lowerCAmelCase = F"""decoder.up_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_lowerCAmelCase = F"""mid_block.resnets.{i}."""
_lowerCAmelCase = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCAmelCase__ : str = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCAmelCase__ : Dict = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = v
lowerCAmelCase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
lowerCAmelCase__ : Tuple = ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
lowerCAmelCase__ : Optional[int] = reshape_weight_for_sd(UpperCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
_lowerCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_lowerCAmelCase = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_lowerCAmelCase = {'''q''': 0, '''k''': 1, '''v''': 2}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = {}
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : List[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
lowerCAmelCase__ : Optional[int] = k[: -len(""".q_proj.weight""" )]
lowerCAmelCase__ : Tuple = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
lowerCAmelCase__ : List[Any] = [None, None, None]
lowerCAmelCase__ : Dict = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
lowerCAmelCase__ : str = k[: -len(""".q_proj.bias""" )]
lowerCAmelCase__ : List[str] = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
lowerCAmelCase__ : Union[str, Any] = [None, None, None]
lowerCAmelCase__ : Any = v
continue
lowerCAmelCase__ : Dict = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
lowerCAmelCase__ : Any = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase )
lowerCAmelCase__ : Tuple = torch.cat(UpperCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
lowerCAmelCase__ : str = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase )
lowerCAmelCase__ : List[Any] = torch.cat(UpperCamelCase )
return new_state_dict
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
_lowerCAmelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
_lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
_lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_lowerCAmelCase = load_file(unet_path, device='''cpu''')
else:
_lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
_lowerCAmelCase = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
_lowerCAmelCase = load_file(vae_path, device='''cpu''')
else:
_lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
_lowerCAmelCase = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
_lowerCAmelCase = load_file(text_enc_path, device='''cpu''')
else:
_lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
_lowerCAmelCase = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
_lowerCAmelCase = convert_unet_state_dict(unet_state_dict)
_lowerCAmelCase = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_lowerCAmelCase = convert_vae_state_dict(vae_state_dict)
_lowerCAmelCase = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_lowerCAmelCase = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_lowerCAmelCase = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
_lowerCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
_lowerCAmelCase = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
_lowerCAmelCase = convert_text_enc_state_dict(text_enc_dict)
_lowerCAmelCase = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_lowerCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_lowerCAmelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_lowerCAmelCase = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 37 | 1 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : List[Any] =logging.get_logger(__name__)
UpperCAmelCase__ : Dict ={
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class __A ( a ):
__A = """encodec"""
def __init__( self , UpperCAmelCase_=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , UpperCAmelCase_=24000 , UpperCAmelCase_=1 , UpperCAmelCase_=False , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=128 , UpperCAmelCase_=32 , UpperCAmelCase_=1 , UpperCAmelCase_=[8, 5, 4, 2] , UpperCAmelCase_="weight_norm" , UpperCAmelCase_=7 , UpperCAmelCase_=7 , UpperCAmelCase_=3 , UpperCAmelCase_=2 , UpperCAmelCase_=True , UpperCAmelCase_="reflect" , UpperCAmelCase_=2 , UpperCAmelCase_=2 , UpperCAmelCase_=1.0 , UpperCAmelCase_=1024 , UpperCAmelCase_=None , UpperCAmelCase_=True , **UpperCAmelCase_ , ):
lowerCamelCase =target_bandwidths
lowerCamelCase =sampling_rate
lowerCamelCase =audio_channels
lowerCamelCase =normalize
lowerCamelCase =chunk_length_s
lowerCamelCase =overlap
lowerCamelCase =hidden_size
lowerCamelCase =num_filters
lowerCamelCase =num_residual_layers
lowerCamelCase =upsampling_ratios
lowerCamelCase =norm_type
lowerCamelCase =kernel_size
lowerCamelCase =last_kernel_size
lowerCamelCase =residual_kernel_size
lowerCamelCase =dilation_growth_rate
lowerCamelCase =use_causal_conv
lowerCamelCase =pad_mode
lowerCamelCase =compress
lowerCamelCase =num_lstm_layers
lowerCamelCase =trim_right_ratio
lowerCamelCase =codebook_size
lowerCamelCase =codebook_dim if codebook_dim is not None else hidden_size
lowerCamelCase =use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**UpperCAmelCase_ )
@property
def _snake_case ( self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _snake_case ( self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _snake_case ( self ):
lowerCamelCase =np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _snake_case ( self ):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 262 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase__ : Union[str, Any] =16
UpperCAmelCase__ : Any =32
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase = 16 ) -> int:
lowerCamelCase =AutoTokenizer.from_pretrained("""bert-base-cased""" )
lowerCamelCase =load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase =datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase =1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase =16
elif accelerator.mixed_precision != "no":
lowerCamelCase =8
else:
lowerCamelCase =None
return tokenizer.pad(
_UpperCAmelCase , padding="""longest""" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
lowerCamelCase =DataLoader(
tokenized_datasets["""train"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
lowerCamelCase =DataLoader(
tokenized_datasets["""validation"""] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase__ : Dict =mocked_dataloaders # noqa: F811
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _UpperCAmelCase ) == "1":
lowerCamelCase =2
# New Code #
lowerCamelCase =int(args.gradient_accumulation_steps )
lowerCamelCase =int(args.local_sgd_steps )
# Initialize accelerator
lowerCamelCase =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase =config["""lr"""]
lowerCamelCase =int(config["""num_epochs"""] )
lowerCamelCase =int(config["""seed"""] )
lowerCamelCase =int(config["""batch_size"""] )
lowerCamelCase =evaluate.load("""glue""" , """mrpc""" )
set_seed(_UpperCAmelCase )
lowerCamelCase , lowerCamelCase =get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase =model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase =AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
lowerCamelCase =get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase =accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
with LocalSGD(
accelerator=_UpperCAmelCase , model=_UpperCAmelCase , local_sgd_steps=_UpperCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCAmelCase ):
lowerCamelCase =model(**_UpperCAmelCase )
lowerCamelCase =output.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase =model(**_UpperCAmelCase )
lowerCamelCase =outputs.logits.argmax(dim=-1 )
lowerCamelCase , lowerCamelCase =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
lowerCamelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _UpperCAmelCase )
def _lowercase ( ) -> Any:
lowerCamelCase =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=_UpperCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=_UpperCAmelCase , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
lowerCamelCase =parser.parse_args()
lowerCamelCase ={"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 262 | 1 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int:
super().__init__(*lowerCAmelCase__, **lowerCAmelCase__)
requires_backends(self, 'vision')
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def a_ ( self, lowerCAmelCase__=None) -> Dict:
snake_case_ = {}
if top_k is not None:
snake_case_ = top_k
return {}, {}, postprocess_params
def __call__( self, lowerCAmelCase__, **lowerCAmelCase__) -> List[Any]:
return super().__call__(lowerCAmelCase__, **lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__) -> Tuple:
snake_case_ = load_image(lowerCAmelCase__)
snake_case_ = self.image_processor(images=lowerCAmelCase__, return_tensors=self.framework)
return model_inputs
def a_ ( self, lowerCAmelCase__) -> str:
snake_case_ = self.model(**lowerCAmelCase__)
return model_outputs
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=5) -> Tuple:
if top_k > self.model.config.num_labels:
snake_case_ = self.model.config.num_labels
if self.framework == "pt":
snake_case_ = model_outputs.logits.softmax(-1)[0]
snake_case_ , snake_case_ = probs.topk(lowerCAmelCase__)
elif self.framework == "tf":
snake_case_ = stable_softmax(model_outputs.logits, axis=-1)[0]
snake_case_ = tf.math.top_k(lowerCAmelCase__, k=lowerCAmelCase__)
snake_case_ , snake_case_ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}')
snake_case_ = scores.tolist()
snake_case_ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase__, lowerCAmelCase__)]
| 69 |
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]]):
'''simple docstring'''
lowerCAmelCase__ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(lowerCamelCase_):
if len(lowerCamelCase_) < i + 1:
data_lists.append([])
data_lists[i].append(float(lowerCamelCase_))
return data_lists
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]] ,lowerCamelCase_ : list[int]):
'''simple docstring'''
lowerCAmelCase__ : list[list[float]] = []
for dlist, weight in zip(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : str = min(lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = max(lowerCamelCase_)
lowerCAmelCase__ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)))
except ZeroDivisionError:
score.append(1)
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind))
except ZeroDivisionError:
score.append(0)
# weight not 0 or 1
else:
lowerCAmelCase__ : Optional[int] = f"""Invalid weight of {weight:f} provided"""
raise ValueError(lowerCamelCase_)
score_lists.append(lowerCamelCase_)
return score_lists
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]]):
'''simple docstring'''
lowerCAmelCase__ : list[float] = [0 for i in range(len(score_lists[0]))]
for slist in score_lists:
for j, ele in enumerate(lowerCamelCase_):
lowerCAmelCase__ : str = final_scores[j] + ele
return final_scores
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]] ,lowerCamelCase_ : list[int]):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = get_data(lowerCamelCase_)
lowerCAmelCase__ : Dict = calculate_each_score(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = generate_final_scores(lowerCamelCase_)
# append scores to source data
for i, ele in enumerate(lowerCamelCase_):
source_data[i].append(lowerCamelCase_)
return source_data
| 129 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : Any = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
__A : List[Any] = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
__A : Any = '''</w>'''
__A : Union[str, Any] = '''@@ '''
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Any = set()
lowerCAmelCase : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
__A : str = {'''facebook/s2t-wav2vec2-large-en-de''': 1024}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : int = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Any = ["input_ids", "attention_mask"]
def __init__( self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]="<s>" , UpperCAmelCase_ : int="<pad>" , UpperCAmelCase_ : List[str]="</s>" , UpperCAmelCase_ : List[str]="<unk>" , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : Union[str, Any] , ):
super().__init__(
unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCAmelCase : Tuple = do_lower_case
with open(UpperCAmelCase_ , encoding='utf-8' ) as vocab_handle:
lowerCAmelCase : List[str] = json.load(UpperCAmelCase_ )
lowerCAmelCase : Dict = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
lowerCAmelCase : str = None
lowerCAmelCase : Tuple = None
else:
with open(UpperCAmelCase_ , encoding='utf-8' ) as merges_handle:
lowerCAmelCase : Tuple = merges_handle.read().split('\n' )[:-1]
lowerCAmelCase : Dict = [tuple(merge.split()[:2] ) for merge in merges]
lowerCAmelCase : List[Any] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase : Optional[int] = {}
@property
def lowercase__ ( self : str ):
return len(self.decoder )
def lowercase__ ( self : int ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Optional[int] ):
lowerCAmelCase : Any = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowerCAmelCase : int = get_pairs(UpperCAmelCase_ )
if not pairs:
return token
while True:
lowerCAmelCase : List[str] = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : self.bpe_ranks.get(UpperCAmelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase , lowerCAmelCase : Optional[Any] = bigram
lowerCAmelCase : List[str] = []
lowerCAmelCase : Tuple = 0
while i < len(UpperCAmelCase_ ):
try:
lowerCAmelCase : List[str] = word.index(UpperCAmelCase_ , UpperCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase : Dict = j
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase : Optional[Any] = tuple(UpperCAmelCase_ )
lowerCAmelCase : Tuple = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
lowerCAmelCase : Dict = get_pairs(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = ' '.join(UpperCAmelCase_ )
if word == "\n " + BPE_TOKEN_MERGES:
lowerCAmelCase : Dict = '\n' + BPE_TOKEN_MERGES
if word.endswith(UpperCAmelCase_ ):
lowerCAmelCase : Optional[int] = word.replace(UpperCAmelCase_ , '' )
lowerCAmelCase : Tuple = word.replace(' ' , UpperCAmelCase_ )
lowerCAmelCase : Dict = word
return word
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : str ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
lowerCAmelCase : Optional[Any] = text.lower()
lowerCAmelCase : Optional[Any] = text.split()
lowerCAmelCase : Optional[int] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(' ' ) ) )
return split_tokens
def lowercase__ ( self : List[str] , UpperCAmelCase_ : str ):
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token ) )
def lowercase__ ( self : Any , UpperCAmelCase_ : int ):
lowerCAmelCase : Union[str, Any] = self.decoder.get(UpperCAmelCase_ , self.unk_token )
return result
def lowercase__ ( self : int , UpperCAmelCase_ : List[str] ):
lowerCAmelCase : List[Any] = ' '.join(UpperCAmelCase_ )
# make sure @@ tokens are concatenated
lowerCAmelCase : int = ''.join(string.split(UpperCAmelCase_ ) )
return string
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase : int = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase : List[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_ ) + '\n' )
lowerCAmelCase : Union[str, Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
lowerCAmelCase : List[str] = token_index
writer.write(' '.join(UpperCAmelCase_ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 323 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__A : List[Any] = trt.Logger(trt.Logger.WARNING)
__A : Optional[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__A : List[Any] = logging.getLogger(__name__)
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
__A : List[str] = parser.parse_args()
if args.tokenizer_name:
__A : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
__A : List[Any] = args.per_device_eval_batch_size
__A : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__A : Any = True
__A : Union[str, Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
__A : List[str] = '''temp_engine/bert-fp16.engine'''
if args.inta:
__A : Dict = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
__A : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__A : str = [network.get_input(i) for i in range(network.num_inputs)]
__A : Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__A : Dict = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__A : List[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__A : Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = np.asarray(inputs['input_ids'], dtype=np.intaa )
lowerCAmelCase : Optional[int] = np.asarray(inputs['attention_mask'], dtype=np.intaa )
lowerCAmelCase : Dict = np.asarray(inputs['token_type_ids'], dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], input_ids.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[1], attention_mask.ravel(), _UpperCAmelCase )
cuda.memcpy_htod_async(d_inputs[2], token_type_ids.ravel(), _UpperCAmelCase )
# start time
lowerCAmelCase : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(_UpperCAmelCase ) for d_inp in d_inputs] + [int(_UpperCAmelCase ), int(_UpperCAmelCase )], stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
cuda.memcpy_dtoh_async(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowerCAmelCase : List[str] = time.time()
lowerCAmelCase : Tuple = end_time - start_time
lowerCAmelCase : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__A : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__A : int = raw_datasets['''validation'''].column_names
__A : int = '''question''' if '''question''' in column_names else column_names[0]
__A : List[str] = '''context''' if '''context''' in column_names else column_names[1]
__A : int = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__A : str = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__A : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowerCAmelCase : Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowerCAmelCase : Union[str, Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation='only_second' if pad_on_right else 'only_first', max_length=_UpperCAmelCase, stride=args.doc_stride, return_overflowing_tokens=_UpperCAmelCase, return_offsets_mapping=_UpperCAmelCase, padding='max_length', )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowerCAmelCase : List[Any] = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowerCAmelCase : Tuple = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowerCAmelCase : Optional[Any] = tokenized_examples.sequence_ids(_UpperCAmelCase )
lowerCAmelCase : Optional[int] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowerCAmelCase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowerCAmelCase : List[Any] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
__A : int = raw_datasets['''validation''']
# Validation Feature Creation
__A : Any = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
__A : List[str] = default_data_collator
__A : int = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
__A : Union[str, Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase="eval" ) -> int:
'''simple docstring'''
lowerCAmelCase : str = postprocess_qa_predictions(
examples=_UpperCAmelCase, features=_UpperCAmelCase, predictions=_UpperCAmelCase, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, null_score_diff_threshold=args.null_score_diff_threshold, output_dir=args.output_dir, prefix=_UpperCAmelCase, )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowerCAmelCase : Union[str, Any] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
lowerCAmelCase : List[Any] = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
lowerCAmelCase : Optional[Any] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_UpperCAmelCase, label_ids=_UpperCAmelCase )
__A : List[Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_UpperCAmelCase ) ) * engine.get_binding_dtype(_UpperCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
__A : List[str] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__A : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__A : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
__A : Tuple = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__A : Union[str, Any] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
__A : Union[str, Any] = 0.0
__A : Optional[Any] = 0
__A : Optional[Any] = timeit.default_timer()
__A : Optional[int] = None
for step, batch in enumerate(eval_dataloader):
__A , __A : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__A , __A : str = outputs
__A : Optional[Any] = torch.tensor(start_logits)
__A : Any = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__A : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__A : int = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__A : Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__A : int = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__A : str = nested_truncate(all_preds, len(eval_dataset))
__A : Any = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
__A : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
__A : str = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 323 | 1 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
UpperCAmelCase : Optional[Any] = 'CompVis/stable-diffusion-v1-1'
UpperCAmelCase : Dict = 'CompVis/stable-diffusion-v1-2'
UpperCAmelCase : Optional[int] = 'CompVis/stable-diffusion-v1-3'
UpperCAmelCase : Union[str, Any] = 'CompVis/stable-diffusion-v1-4'
class lowerCamelCase__ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Tuple = True , ):
'''simple docstring'''
super()._init_()
__UpperCAmelCase : List[str] = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ )
__UpperCAmelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ )
__UpperCAmelCase : int = StableDiffusionPipeline(
vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , requires_safety_checker=lowerCAmelCase__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return {k: getattr(self , lowerCAmelCase__ ) for k in self.config.keys() if not k.startswith("""_""" )}
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Optional[int] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__UpperCAmelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
self.enable_attention_slicing(lowerCAmelCase__ )
@torch.no_grad()
def lowerCamelCase__ ( self : int , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] = 512 , UpperCamelCase : Dict = 512 , UpperCamelCase : int = 50 , UpperCamelCase : Any = 7.5 , UpperCamelCase : Dict = None , UpperCamelCase : Any = 1 , UpperCamelCase : str = 0.0 , UpperCamelCase : List[str] = None , UpperCamelCase : List[str] = None , UpperCamelCase : int = "pil" , UpperCamelCase : Any = True , UpperCamelCase : str = None , UpperCamelCase : Dict = 1 , **UpperCamelCase : str , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , )
@torch.no_grad()
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : int , UpperCamelCase : Dict = 512 , UpperCamelCase : str = 512 , UpperCamelCase : Union[str, Any] = 50 , UpperCamelCase : Optional[Any] = 7.5 , UpperCamelCase : Dict = None , UpperCamelCase : Tuple = 1 , UpperCamelCase : int = 0.0 , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[Any] = None , UpperCamelCase : Optional[int] = "pil" , UpperCamelCase : Optional[Any] = True , UpperCamelCase : Union[str, Any] = None , UpperCamelCase : str = 1 , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , )
@torch.no_grad()
def lowerCamelCase__ ( self : Any , UpperCamelCase : str , UpperCamelCase : List[Any] = 512 , UpperCamelCase : Dict = 512 , UpperCamelCase : Union[str, Any] = 50 , UpperCamelCase : int = 7.5 , UpperCamelCase : Tuple = None , UpperCamelCase : Dict = 1 , UpperCamelCase : str = 0.0 , UpperCamelCase : Union[str, Any] = None , UpperCamelCase : int = None , UpperCamelCase : Union[str, Any] = "pil" , UpperCamelCase : str = True , UpperCamelCase : Tuple = None , UpperCamelCase : List[Any] = 1 , **UpperCamelCase : Tuple , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , )
@torch.no_grad()
def lowerCamelCase__ ( self : int , UpperCamelCase : Dict , UpperCamelCase : Optional[int] = 512 , UpperCamelCase : str = 512 , UpperCamelCase : Optional[Any] = 50 , UpperCamelCase : Optional[Any] = 7.5 , UpperCamelCase : Dict = None , UpperCamelCase : str = 1 , UpperCamelCase : Any = 0.0 , UpperCamelCase : List[Any] = None , UpperCamelCase : List[Any] = None , UpperCamelCase : Dict = "pil" , UpperCamelCase : Union[str, Any] = True , UpperCamelCase : Dict = None , UpperCamelCase : Any = 1 , **UpperCamelCase : Any , ):
'''simple docstring'''
return self.pipea(
prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , )
@torch.no_grad()
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] = 512 , UpperCamelCase : str = 512 , UpperCamelCase : Any = 50 , UpperCamelCase : List[Any] = 7.5 , UpperCamelCase : List[str] = None , UpperCamelCase : Union[str, Any] = 1 , UpperCamelCase : Optional[Any] = 0.0 , UpperCamelCase : Dict = None , UpperCamelCase : str = None , UpperCamelCase : Optional[int] = "pil" , UpperCamelCase : List[Any] = True , UpperCamelCase : Tuple = None , UpperCamelCase : Optional[Any] = 1 , **UpperCamelCase : List[Any] , ):
'''simple docstring'''
__UpperCAmelCase : str = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(lowerCAmelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
__UpperCAmelCase : List[Any] = self.textaimg_sda_a(
prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
__UpperCAmelCase : Tuple = self.textaimg_sda_a(
prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
__UpperCAmelCase : Tuple = self.textaimg_sda_a(
prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
__UpperCAmelCase : str = self.textaimg_sda_a(
prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 115 |
"""simple docstring"""
__UpperCamelCase = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''negative_prompt'''])
__UpperCamelCase = frozenset([])
__UpperCamelCase = frozenset(['''image'''])
__UpperCamelCase = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__UpperCamelCase = frozenset(['''image'''])
__UpperCamelCase = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
__UpperCamelCase = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
__UpperCamelCase = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__UpperCamelCase = frozenset(['''image''', '''mask_image'''])
__UpperCamelCase = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__UpperCamelCase = frozenset(['''example_image''', '''image''', '''mask_image'''])
__UpperCamelCase = frozenset(['''class_labels'''])
__UpperCamelCase = frozenset(['''class_labels'''])
__UpperCamelCase = frozenset(['''batch_size'''])
__UpperCamelCase = frozenset([])
__UpperCamelCase = frozenset(['''batch_size'''])
__UpperCamelCase = frozenset([])
__UpperCamelCase = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''negative_prompt'''])
__UpperCamelCase = frozenset(['''input_tokens'''])
__UpperCamelCase = frozenset(['''input_tokens'''])
| 113 | 0 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : str = "bart"
a : str = ["past_key_values"]
a : int = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self, __magic_name__=50265, __magic_name__=1024, __magic_name__=12, __magic_name__=4096, __magic_name__=16, __magic_name__=12, __magic_name__=4096, __magic_name__=16, __magic_name__=0.0, __magic_name__=0.0, __magic_name__="gelu", __magic_name__=1024, __magic_name__=0.1, __magic_name__=0.0, __magic_name__=0.0, __magic_name__=0.02, __magic_name__=0.0, __magic_name__=False, __magic_name__=True, __magic_name__=3, __magic_name__=1, __magic_name__=0, __magic_name__=2, __magic_name__=True, __magic_name__=2, __magic_name__=2, **__magic_name__, ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = vocab_size
UpperCamelCase__ : int = max_position_embeddings
UpperCamelCase__ : int = d_model
UpperCamelCase__ : Dict = encoder_ffn_dim
UpperCamelCase__ : Optional[Any] = encoder_layers
UpperCamelCase__ : Optional[Any] = encoder_attention_heads
UpperCamelCase__ : Union[str, Any] = decoder_ffn_dim
UpperCamelCase__ : Any = decoder_layers
UpperCamelCase__ : str = decoder_attention_heads
UpperCamelCase__ : Optional[int] = dropout
UpperCamelCase__ : Dict = attention_dropout
UpperCamelCase__ : str = activation_dropout
UpperCamelCase__ : int = activation_function
UpperCamelCase__ : List[Any] = init_std
UpperCamelCase__ : Dict = encoder_layerdrop
UpperCamelCase__ : Union[str, Any] = decoder_layerdrop
UpperCamelCase__ : Any = classifier_dropout
UpperCamelCase__ : List[Any] = use_cache
UpperCamelCase__ : List[Any] = encoder_layers
UpperCamelCase__ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__magic_name__, pad_token_id=__magic_name__, bos_token_id=__magic_name__, eos_token_id=__magic_name__, is_encoder_decoder=__magic_name__, decoder_start_token_id=__magic_name__, forced_eos_token_id=__magic_name__, **__magic_name__, )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''', __magic_name__ ):
UpperCamelCase__ : str = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'''The config can simply be saved and uploaded again to be fixed.''' )
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ : str = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCamelCase__ : Optional[Any] = {0: '''batch'''}
UpperCamelCase__ : Optional[Any] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
UpperCamelCase__ : Any = {0: '''batch''', 1: '''decoder_sequence'''}
UpperCamelCase__ : str = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__, direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase__ : Any = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
UpperCamelCase__ ,UpperCamelCase__ : str = self.num_layers
for i in range(__magic_name__ ):
UpperCamelCase__ : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCamelCase__ : Dict = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
UpperCamelCase__ : Union[str, Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ : List[str] = super().outputs
else:
UpperCamelCase__ : Optional[int] = super(__magic_name__, self ).outputs
if self.use_past:
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self.num_layers
for i in range(__magic_name__ ):
UpperCamelCase__ : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
UpperCamelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = -1, __magic_name__ = -1, __magic_name__ = False, __magic_name__ = None, ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ )
# Generate decoder inputs
UpperCamelCase__ : Tuple = seq_length if not self.use_past else 1
UpperCamelCase__ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ )
UpperCamelCase__ : int = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase__ : Optional[Any] = dict(**__magic_name__, **__magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCamelCase__ ,UpperCamelCase__ : Tuple = common_inputs['''input_ids'''].shape
UpperCamelCase__ : Tuple = common_inputs['''decoder_input_ids'''].shape[1]
UpperCamelCase__ ,UpperCamelCase__ : Any = self.num_attention_heads
UpperCamelCase__ : Dict = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase__ : Optional[int] = decoder_seq_length + 3
UpperCamelCase__ : Any = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase__ : Any = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__magic_name__, __magic_name__ )], dim=1 )
UpperCamelCase__ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = self.num_layers
UpperCamelCase__ : Union[str, Any] = min(__magic_name__, __magic_name__ )
UpperCamelCase__ : List[Any] = max(__magic_name__, __magic_name__ ) - min_num_layers
UpperCamelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__magic_name__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
) )
# TODO: test this.
UpperCamelCase__ : Union[str, Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__magic_name__, __magic_name__ ):
common_inputs["past_key_values"].append((torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) )
return common_inputs
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = -1, __magic_name__ = -1, __magic_name__ = False, __magic_name__ = None, ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
UpperCamelCase__ : Tuple = seqlen + 2
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = self.num_layers
UpperCamelCase__ ,UpperCamelCase__ : int = self.num_attention_heads
UpperCamelCase__ : Any = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase__ : Any = common_inputs['''attention_mask'''].dtype
UpperCamelCase__ : str = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__magic_name__, __magic_name__, dtype=__magic_name__ )], dim=1 )
UpperCamelCase__ : Tuple = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(__magic_name__ )
]
return common_inputs
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = -1, __magic_name__ = -1, __magic_name__ = False, __magic_name__ = None, ) -> Mapping[str, Any]:
"""simple docstring"""
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ : Dict = compute_effective_axis_dimension(
__magic_name__, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ : Tuple = tokenizer.num_special_tokens_to_add(__magic_name__ )
UpperCamelCase__ : Any = compute_effective_axis_dimension(
__magic_name__, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ : int = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase__ : Union[str, Any] = dict(tokenizer(__magic_name__, return_tensors=__magic_name__ ) )
return common_inputs
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = -1, __magic_name__ = -1, __magic_name__ = False, __magic_name__ = None, ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__magic_name__, batch_size=__magic_name__, seq_length=__magic_name__, is_pair=__magic_name__, framework=__magic_name__ )
elif self.task == "causal-lm":
UpperCamelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm(
__magic_name__, batch_size=__magic_name__, seq_length=__magic_name__, is_pair=__magic_name__, framework=__magic_name__ )
else:
UpperCamelCase__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__magic_name__, batch_size=__magic_name__, seq_length=__magic_name__, is_pair=__magic_name__, framework=__magic_name__ )
return common_inputs
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ : int = super()._flatten_past_key_values_(__magic_name__, __magic_name__, __magic_name__, __magic_name__ )
else:
UpperCamelCase__ : int = super(__magic_name__, self )._flatten_past_key_values_(
__magic_name__, __magic_name__, __magic_name__, __magic_name__ )
| 247 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Optional[Any]=False ) -> List[Any]:
UpperCamelCase__ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCamelCase__ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: Any , __UpperCAmelCase: Dict=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
UpperCamelCase__ : Tuple = ''''''
else:
UpperCamelCase__ : Optional[int] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Dict = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ : Union[str, Any] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ : int = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] ) -> Optional[Any]:
UpperCamelCase__ : int = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: str , __UpperCAmelCase: Tuple ) -> Dict:
UpperCamelCase__ : List[str] = dct.pop(__UpperCAmelCase )
UpperCamelCase__ : int = val
def lowerCAmelCase_ ( ) -> Tuple:
UpperCamelCase__ : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase__ : int = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] , __UpperCAmelCase: Dict , __UpperCAmelCase: List[Any]=True ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
UpperCamelCase__ : List[str] = 8
# set labels if required
if not base_model:
UpperCamelCase__ : Union[str, Any] = 1000
UpperCamelCase__ : Optional[Any] = '''huggingface/label-files'''
UpperCamelCase__ : Dict = '''imagenet-1k-id2label.json'''
UpperCamelCase__ : str = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : Dict = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : str = idalabel
UpperCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
UpperCamelCase__ : str = 384
UpperCamelCase__ : str = 1536
UpperCamelCase__ : Tuple = 12
UpperCamelCase__ : Optional[int] = 6
# load original model from torch hub
UpperCamelCase__ : Any = torch.hub.load('''facebookresearch/dino:main''' , __UpperCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ : str = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
UpperCamelCase__ : int = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# load HuggingFace model
if base_model:
UpperCamelCase__ : int = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase ).eval()
else:
UpperCamelCase__ : Optional[int] = ViTForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
UpperCamelCase__ : Dict = ViTImageProcessor()
UpperCamelCase__ : List[Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
UpperCamelCase__ : Optional[Any] = encoding['''pixel_values''']
UpperCamelCase__ : Optional[Any] = model(__UpperCAmelCase )
if base_model:
UpperCamelCase__ : Union[str, Any] = original_model(__UpperCAmelCase )
assert torch.allclose(__UpperCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
UpperCamelCase__ : Any = original_model(__UpperCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1e-3 )
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
UpperCAmelCase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 247 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_12 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.0_2 , __magic_name__=False , __magic_name__=True , __magic_name__="None" , __magic_name__=3 , __magic_name__=4 , __magic_name__=None , ) -> Optional[Any]:
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = relative_attention
_a = position_biased_input
_a = pos_att_type
_a = scope
def __UpperCAmelCase ( self ) -> Tuple:
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ) -> str:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCAmelCase ( self ) -> str:
_a = self.get_config()
_a = 3_00
return config
def __UpperCAmelCase ( self , __magic_name__ ) -> List[Any]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
_a = DebertaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )[0]
_a = model(__magic_name__ , token_type_ids=__magic_name__ )[0]
_a = model(__magic_name__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = DebertaForMaskedLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
_a = self.num_labels
_a = DebertaForSequenceClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
_a = self.num_labels
_a = DebertaForTokenClassification(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = DebertaForQuestionAnswering(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
_a = model(
__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowerCAmelCase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def __UpperCAmelCase ( self ) -> Tuple:
_a = DebertaModelTester(self )
_a = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def __UpperCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ) -> List[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__magic_name__ )
def __UpperCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__magic_name__ )
def __UpperCAmelCase ( self ) -> List[str]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__magic_name__ )
def __UpperCAmelCase ( self ) -> List[Any]:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__magic_name__ )
@slow
def __UpperCAmelCase ( self ) -> Optional[int]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DebertaModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def __UpperCAmelCase ( self ) -> str:
pass
@slow
def __UpperCAmelCase ( self ) -> List[Any]:
_a = DebertaModel.from_pretrained('microsoft/deberta-base' )
_a = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a = model(__magic_name__ , attention_mask=__magic_name__ )[0]
# compare the actual values for a slice.
_a = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
| 168 |
'''simple docstring'''
import numpy as np
class a :
def __init__( self ) -> List[str]:
_a = (0, 0)
_a = None
_a = 0
_a = 0
_a = 0
def __eq__( self , __magic_name__ ) -> Optional[int]:
return self.position == cell.position
def __UpperCAmelCase ( self ) -> Any:
print(self.position )
class a :
def __init__( self , __magic_name__=(5, 5) ) -> Optional[int]:
_a = np.zeros(__magic_name__ )
_a = world_size[0]
_a = world_size[1]
def __UpperCAmelCase ( self ) -> List[Any]:
print(self.w )
def __UpperCAmelCase ( self , __magic_name__ ) -> Union[str, Any]:
_a = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
_a = cell.position[0]
_a = cell.position[1]
_a = []
for n in neughbour_cord:
_a = current_x + n[0]
_a = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
_a = Cell()
_a = (x, y)
_a = cell
neighbours.append(__magic_name__ )
return neighbours
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int ) -> List[str]:
'''simple docstring'''
_a = []
_a = []
_open.append(lowerCAmelCase__ )
while _open:
_a = np.argmin([n.f for n in _open] )
_a = _open[min_f]
_closed.append(_open.pop(lowerCAmelCase__ ) )
if current == goal:
break
for n in world.get_neigbours(lowerCAmelCase__ ):
for c in _closed:
if c == n:
continue
_a = current.g + 1
_a , _a = n.position
_a , _a = goal.position
_a = (ya - ya) ** 2 + (xa - xa) ** 2
_a = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(lowerCAmelCase__ )
_a = []
while current.parent is not None:
path.append(current.position )
_a = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
a_ : str = Gridworld()
# Start position and goal
a_ : str = Cell()
a_ : Dict = (0, 0)
a_ : Dict = Cell()
a_ : Optional[Any] = (4, 4)
print(f'''path from {start.position} to {goal.position}''')
a_ : Tuple = astar(world, start, goal)
# Just for visual reasons.
for i in s:
a_ : Any = 1
print(world.w)
| 168 | 1 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _lowerCamelCase ( lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
return EnvironmentCommand()
def _lowerCamelCase ( lowerCamelCase_ : Tuple ):
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
@staticmethod
def _UpperCamelCase ( snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Any = parser.add_parser('env' )
download_parser.set_defaults(func=snake_case_ )
download_parser.add_argument(
'--accelerate-config_file' , default=snake_case_ , help='The accelerate config file to use for the default values in the launching script.' , )
download_parser.set_defaults(func=snake_case_ )
def __init__( self , snake_case_ , *snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : str = accelerate_config_file
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = 'not installed'
if is_safetensors_available():
import safetensors
UpperCAmelCase_ : Dict = safetensors.__version__
elif importlib.util.find_spec('safetensors' ) is not None:
import safetensors
UpperCAmelCase_ : Optional[Any] = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
UpperCAmelCase_ : Any = 'not installed'
UpperCAmelCase_ : int = 'not found'
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
UpperCAmelCase_ : str = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(snake_case_ ):
UpperCAmelCase_ : List[Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
UpperCAmelCase_ : Union[str, Any] = (
'\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(snake_case_ , snake_case_ )
else F'''\t{accelerate_config}'''
)
UpperCAmelCase_ : List[Any] = 'not installed'
UpperCAmelCase_ : Optional[Any] = 'NA'
if is_torch_available():
import torch
UpperCAmelCase_ : List[str] = torch.__version__
UpperCAmelCase_ : Optional[Any] = torch.cuda.is_available()
UpperCAmelCase_ : Union[str, Any] = 'not installed'
UpperCAmelCase_ : List[Any] = 'NA'
if is_tf_available():
import tensorflow as tf
UpperCAmelCase_ : Any = tf.__version__
try:
# deprecated in v2.1
UpperCAmelCase_ : str = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
UpperCAmelCase_ : Optional[Any] = bool(tf.config.list_physical_devices('GPU' ) )
UpperCAmelCase_ : str = 'not installed'
UpperCAmelCase_ : int = 'not installed'
UpperCAmelCase_ : Optional[Any] = 'not installed'
UpperCAmelCase_ : int = 'NA'
if is_flax_available():
import flax
import jax
import jaxlib
UpperCAmelCase_ : Tuple = flax.__version__
UpperCAmelCase_ : Union[str, Any] = jax.__version__
UpperCAmelCase_ : int = jaxlib.__version__
UpperCAmelCase_ : Optional[Any] = jax.lib.xla_bridge.get_backend().platform
UpperCAmelCase_ : Union[str, Any] = {
'`transformers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Huggingface_hub version': huggingface_hub.__version__,
'Safetensors version': F'''{safetensors_version}''',
'Accelerate version': F'''{accelerate_version}''',
'Accelerate config': F'''{accelerate_config_str}''',
'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''',
'Tensorflow version (GPU?)': F'''{tf_version} ({tf_cuda_available})''',
'Flax version (CPU?/GPU?/TPU?)': F'''{flax_version} ({jax_backend})''',
'Jax version': F'''{jax_version}''',
'JaxLib version': F'''{jaxlib_version}''',
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(snake_case_ ) )
return info
@staticmethod
def _UpperCamelCase ( snake_case_ ):
'''simple docstring'''
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 361 |
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str = " " ):
"""simple docstring"""
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[Any] = 0
for index, char in enumerate(lowerCamelCase_ ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase_ : Optional[Any] = index + 1
elif index + 1 == len(lowerCamelCase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 274 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_a : Any = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_lowerCAmelCase : List[Any] = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" ,type=_lowerCamelCase ,default="""data/dump.txt""" ,help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" ,type=_lowerCamelCase ,default="""bert""" ,choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" ,type=_lowerCamelCase ,default="""bert-base-uncased""" ,help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" ,type=_lowerCamelCase ,default="""data/dump""" ,help="""The dump file prefix.""" )
_lowerCAmelCase : int = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
_lowerCAmelCase : int = BertTokenizer.from_pretrained(args.tokenizer_name )
_lowerCAmelCase : List[Any] = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
_lowerCAmelCase : Union[str, Any] = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
_lowerCAmelCase : Optional[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_lowerCAmelCase : Union[str, Any] = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
_lowerCAmelCase : Dict = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
_lowerCAmelCase : Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_lowerCAmelCase : Any = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
_lowerCAmelCase : List[Any] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}" )
with open(args.file_path ,"""r""" ,encoding="""utf8""" ) as fp:
_lowerCAmelCase : Tuple = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f"{len(_lowerCamelCase )} examples to process." )
_lowerCAmelCase : Dict = []
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = 10000
_lowerCAmelCase : str = time.time()
for text in data:
_lowerCAmelCase : Optional[int] = f"{bos} {text.strip()} {sep}"
_lowerCAmelCase : Dict = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
rslt.append(_lowerCamelCase )
iter += 1
if iter % interval == 0:
_lowerCAmelCase : int = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
_lowerCAmelCase : List[Any] = time.time()
logger.info("""Finished binarization""" )
logger.info(f"{len(_lowerCamelCase )} examples processed." )
_lowerCAmelCase : List[Any] = f"{args.dump_file}.{args.tokenizer_name}.pickle"
_lowerCAmelCase : List[Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
_lowerCAmelCase : Tuple = [np.uintaa(_lowerCamelCase ) for d in rslt]
else:
_lowerCAmelCase : str = [np.intaa(_lowerCamelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"Dump to {dp_file}" )
with open(_lowerCamelCase ,"""wb""" ) as handle:
pickle.dump(rslt_ ,_lowerCamelCase ,protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 44 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str = "cpu" , lowerCAmelCase__ : Union[str, None] = None ) -> None:
"""simple docstring"""
lowerCAmelCase_ : Any = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase__ , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
lowerCAmelCase_ : str = v.half()
if save_path is None: # overwrite src_path
lowerCAmelCase_ : Dict = src_path
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
fire.Fire(convert)
| 224 | 0 |
from bisect import bisect
from itertools import accumulate
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = sorted(zip(__UpperCamelCase , __UpperCamelCase ) , key=lambda __UpperCamelCase : x[0] / x[1] , reverse=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = [i[0] for i in r], [i[1] for i in r]
SCREAMING_SNAKE_CASE__ = list(accumulate(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ = bisect(__UpperCamelCase , __UpperCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__lowerCamelCase : int = 4
__lowerCamelCase : Dict = 3
class __snake_case ( lowerCamelCase_ ):
pass
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
for shard in shards:
for i in range(__UpperCamelCase ):
yield {"i": i, "shard": shard}
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = int(os.environ["""RANK"""] )
SCREAMING_SNAKE_CASE__ = int(os.environ["""WORLD_SIZE"""] )
SCREAMING_SNAKE_CASE__ = ArgumentParser()
parser.add_argument("""--streaming""" , type=__UpperCamelCase )
parser.add_argument("""--local_rank""" , type=__UpperCamelCase )
parser.add_argument("""--num_workers""" , type=__UpperCamelCase , default=0 )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = args.streaming
SCREAMING_SNAKE_CASE__ = args.num_workers
SCREAMING_SNAKE_CASE__ = {"""shards""": [f"""shard_{shard_idx}""" for shard_idx in range(__UpperCamelCase )]}
SCREAMING_SNAKE_CASE__ = IterableDataset.from_generator(__UpperCamelCase , gen_kwargs=__UpperCamelCase )
if not streaming:
SCREAMING_SNAKE_CASE__ = Dataset.from_list(list(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ = split_dataset_by_node(__UpperCamelCase , rank=__UpperCamelCase , world_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = torch.utils.data.DataLoader(__UpperCamelCase , num_workers=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
SCREAMING_SNAKE_CASE__ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
SCREAMING_SNAKE_CASE__ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 204 | 1 |
"""simple docstring"""
from __future__ import annotations
class UpperCamelCase :
def __init__( self, lowerCAmelCase__=None) -> Optional[int]:
snake_case_ = data
snake_case_ = None
def __repr__( self) -> List[str]:
snake_case_ = []
snake_case_ = self
while temp:
string_rep.append(f'{temp.data}')
snake_case_ = temp.next
return "->".join(_lowerCAmelCase)
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
if not elements_list:
raise Exception('The Elements List is empty' )
snake_case_ = snake_case_ = Node(elements_list[0] )
for i in range(1 , len(lowerCAmelCase_ ) ):
snake_case_ = Node(elements_list[i] )
snake_case_ = current.next
return head
def UpperCAmelCase ( UpperCAmelCase ) -> None:
if head_node is not None and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
print_reverse(head_node.next )
print(head_node.data )
def UpperCAmelCase ( ) -> Any:
from doctest import testmod
testmod()
snake_case_ = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(lowerCAmelCase_ )
print('Elements in Reverse:' )
print_reverse(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 69 |
import unittest
from knapsack import greedy_knapsack as kp
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
snake_case_ = [1_0, 2_0, 3_0, 4_0, 5_0, 6_0]
snake_case_ = [2, 4, 6, 8, 1_0, 1_2]
snake_case_ = 1_0_0
self.assertEqual(kp.calc_profit(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) , 2_1_0 )
def lowerCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(_lowerCAmelCase , "max_weight must greater than zero." )
def lowerCAmelCase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
self.assertRaisesRegex(_lowerCAmelCase , "Weight can not be negative." )
def lowerCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
self.assertRaisesRegex(_lowerCAmelCase , "Profit can not be negative." )
def lowerCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
self.assertRaisesRegex(_lowerCAmelCase , "max_weight must greater than zero." )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.assertRaisesRegex(
_lowerCAmelCase , "The length of profit and weight must be same." )
if __name__ == "__main__":
unittest.main()
| 159 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'facebook/bart-large-mnli'
lowerCamelCase = (
'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '
'should be the text to classify, and `labels`, which should be the list of labels to use for classification. '
'It returns the most likely label in the list of provided `labels` for the input text.'
)
lowerCamelCase = 'text_classifier'
lowerCamelCase = AutoTokenizer
lowerCamelCase = AutoModelForSequenceClassification
lowerCamelCase = ['text', ['text']]
lowerCamelCase = ['text']
def snake_case__ ( self : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
super().setup()
A__ = self.model.config
A__ = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A__ = int(lowercase_ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def snake_case__ ( self : Dict,lowercase_ : Union[str, Any],lowercase_ : Tuple )-> List[Any]:
'''simple docstring'''
A__ = labels
return self.pre_processor(
[text] * len(lowercase_ ),[F'This example is {label}' for label in labels],return_tensors='pt',padding='max_length',)
def snake_case__ ( self : Any,lowercase_ : Dict )-> List[Any]:
'''simple docstring'''
A__ = outputs.logits
A__ = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 282 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[Any],lowercase_ : str )-> List[Any]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'],model_result['ss'] ):
A__ = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase_ )
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
A__ = 'sgugger/tiny-distilbert-classification'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,only_pretrain_model=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,torchscript=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu','Cant do half precision' )
def snake_case__ ( self : Any )-> Dict:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,fpaa=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = AutoConfig.from_pretrained(lowercase_ )
# set architectures equal to `None`
A__ = None
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : Union[str, Any] )-> int:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu','Can\'t do half precision' )
def snake_case__ ( self : List[Any] )-> Dict:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],fpaa=lowercase_,multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case__ ( self : int )-> Optional[int]:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = 'sshleifer/tinier_bart'
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : List[str] )-> List[str]:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case__ ( self : int )-> Union[str, Any]:
'''simple docstring'''
A__ = 'sshleifer/tinier_bart'
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,save_to_csv=lowercase_,sequence_lengths=[8],batch_sizes=[1],inference_time_csv_file=os.path.join(lowercase_,'inf_time.csv' ),train_memory_csv_file=os.path.join(lowercase_,'train_mem.csv' ),inference_memory_csv_file=os.path.join(lowercase_,'inf_mem.csv' ),train_time_csv_file=os.path.join(lowercase_,'train_time.csv' ),env_info_csv_file=os.path.join(lowercase_,'env.csv' ),multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase_,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_,'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_,'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_,'env.csv' ) ).exists() )
def snake_case__ ( self : Tuple )-> str:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase_ : Optional[Any] ):
self.assertTrue(hasattr(lowercase_,'sequential' ) )
self.assertTrue(hasattr(lowercase_,'cumulative' ) )
self.assertTrue(hasattr(lowercase_,'current' ) )
self.assertTrue(hasattr(lowercase_,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],log_filename=os.path.join(lowercase_,'log.txt' ),log_print=lowercase_,trace_memory_line_by_line=lowercase_,multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase_,'log.txt' ) ).exists() )
| 282 | 1 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def __magic_name__ ( *__A : Tuple, **__A : Union[str, Any] ):
pass
def a__ ( UpperCAmelCase : int ) -> str:
UpperCAmelCase : Optional[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def a__ ( UpperCAmelCase : Tuple ) -> Dict:
UpperCAmelCase : str = np.array(UpperCAmelCase )
UpperCAmelCase : Optional[int] = npimg.shape
return {"hash": hashimage(UpperCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __magic_name__ ( self : Dict, __A : Optional[Any], __A : Optional[int], __A : str ):
UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=_UpperCamelCase, image_processor=_UpperCamelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __magic_name__ ( self : Union[str, Any], __A : Union[str, Any], __A : Optional[int] ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def __magic_name__ ( self : int ):
pass
@slow
@require_torch
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = pipeline('''mask-generation''', model='''facebook/sam-vit-huge''' )
UpperCAmelCase : str = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''', points_per_batch=2_5_6 )
# Shortening by hashing
UpperCAmelCase : Any = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCamelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCamelCase, decimals=4 ), [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_4_4_4},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_2_1},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_6_7},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_3_2},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_0_5_3},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_9_6_7},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_9_3},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_9_0_9},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_8_7_9},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_8_3_4},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_7_1_6},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_6_1_2},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_9_9},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_5_2},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_3_2},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_5_1_6},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_9_9},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_8_3},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_6_4},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_3},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_3},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_4_0_8},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_3_3_5},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_3_2_6},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.9_2_6_2},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_9_9_9},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_9_8_6},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_9_8_4},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_8_7_3},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 0.8_8_7_1}
], )
# fmt: on
@require_torch
@slow
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : List[Any] = "facebook/sam-vit-huge"
UpperCAmelCase : Any = pipeline('''mask-generation''', model=_UpperCamelCase )
UpperCAmelCase : List[str] = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''', pred_iou_thresh=1, points_per_batch=2_5_6 )
# Shortening by hashing
UpperCAmelCase : str = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCamelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCamelCase, decimals=4 ), [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_4_4_4},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_2_1_0},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_6_7},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_1_3_2},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (4_8_0, 6_4_0)}, '''scores''': 1.0_0_5_3},
], )
| 336 |
'''simple docstring'''
import os
import pytest
from attr import dataclass
_snake_case = 'us-east-1' # defaults region
@dataclass
class a__ :
_SCREAMING_SNAKE_CASE : str
_SCREAMING_SNAKE_CASE : str = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_SCREAMING_SNAKE_CASE : Dict = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5500,
}
_SCREAMING_SNAKE_CASE : List[str] = {**hyperparameters, 'max_steps': 1000}
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return f'''{self.framework}-transfromers-test'''
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def _A ( snake_case ) -> Tuple:
_lowercase : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 250 | 0 |
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_ ):
# Base Case
if index == len(snake_case_ ):
return True
# Recursive Step
for i in range(snake_case_ ):
if valid_coloring(graph[index],snake_case_,snake_case_ ):
# Color current vertex
_A : int = i
# Validate coloring
if util_color(snake_case_,snake_case_,snake_case_,index + 1 ):
return True
# Backtrack
_A : int = -1
return False
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = [-1] * len(snake_case_ )
if util_color(snake_case_,snake_case_,snake_case_,0 ):
return colored_vertices
return []
| 343 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( UpperCamelCase__ ):
_a = ["image_processor", "tokenizer"]
_a = "BlipImageProcessor"
_a = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a , _a ) -> Any:
_A : List[Any] = False
super().__init__(_a , _a )
_A : Optional[int] = self.image_processor
def __call__( self , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
_A : Dict = self.tokenizer
_A : Dict = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
# add pixel_values
_A : int = self.image_processor(_a , return_tensors=_a )
if text is not None:
_A : List[Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
else:
_A : int = None
if text_encoding is not None:
encoding_image_processor.update(_a )
return encoding_image_processor
def a__ ( self , *_a , **_a ) -> Any:
return self.tokenizer.batch_decode(*_a , **_a )
def a__ ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.decode(*_a , **_a )
@property
def a__ ( self ) -> Optional[Any]:
_A : Any = self.tokenizer.model_input_names
_A : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 343 | 1 |
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ : Optional[Any] = datasets.utils.logging.get_logger(__name__)
class lowerCAmelCase__ ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = None
class lowerCAmelCase__ ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
__UpperCamelCase = datasets.Audio()
__UpperCamelCase = 'audio'
__UpperCamelCase = AudioFolderConfig
__UpperCamelCase = 42 # definition at the bottom of the script
__UpperCamelCase = AudioClassification(audio_column="audio" , label_column="label" )
UpperCAmelCase_ : Any = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
UpperCAmelCase_ : Union[str, Any] = AUDIO_EXTENSIONS
| 91 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A: List[str] = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Optional[int] = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
A: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109 | 0 |
from ..utils import DummyObject, requires_backends
class _A ( metaclass=snake_case__ ):
_UpperCamelCase : Optional[Any] = ['''keras_nlp''']
def __init__( self : Dict , *_A : str , **_A : List[str] ) -> str:
"""simple docstring"""
requires_backends(self , ['''keras_nlp'''] )
| 365 |
class _A : # Public class to implement a graph
def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
"""simple docstring"""
lowercase : Tuple = row
lowercase : Union[str, Any] = col
lowercase : int = graph
def __a ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __a ( self : int , _A : int , _A : int , _A : list[list[bool]] ) -> None:
"""simple docstring"""
lowercase : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase : Dict = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase : Dict = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A )
def __a ( self : List[str] ) -> int: # And finally, count all islands.
"""simple docstring"""
lowercase : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase : Optional[Any] = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_A , _A , _A )
count += 1
return count
| 116 | 0 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__A = pytest.mark.integration
__A = {"comet"}
__A = importlib.util.find_spec("fairseq") is not None
__A = {"code_eval"}
__A = os.name == "nt"
__A = {"bertscore", "frugalscore", "perplexity"}
__A = importlib.util.find_spec("transformers") is not None
def UpperCamelCase__ ( lowercase__ : Tuple ):
@wraps(__lowerCamelCase )
def wrapper(self : Dict , lowercase__ : Tuple ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , __lowerCamelCase )
return wrapper
def UpperCamelCase__ ( lowercase__ : Dict ):
@wraps(__lowerCamelCase )
def wrapper(self : Optional[int] , lowercase__ : Optional[int] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , __lowerCamelCase )
return wrapper
def UpperCamelCase__ ( lowercase__ : List[Any] ):
@wraps(__lowerCamelCase )
def wrapper(self : List[Any] , lowercase__ : List[str] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , __lowerCamelCase )
return wrapper
def UpperCamelCase__ ( ):
snake_case : Tuple = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@local
class lowerCamelCase__ ( parameterized.TestCase ):
a__ : Optional[Any] = {}
a__ : Union[str, Any] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[int] = "[...]"
snake_case : int = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , _A ) ).module_path )
snake_case : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=_A )
# check parameters
snake_case : Dict = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_A , metric_module.__name__ ):
with self.use_local_metrics():
try:
snake_case : List[str] = doctest.testmod(_A , verbose=_A , raise_on_error=_A )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : List[Any] = "[...]"
snake_case : Tuple = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , _A ) ).module_path )
# run doctest
with self.use_local_metrics():
snake_case : int = doctest.testmod(_A , verbose=_A , raise_on_error=_A )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_A ):
yield
else:
yield
@contextmanager
def lowerCamelCase_ ( self ):
"""simple docstring"""
def load_local_metric(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
return load_metric(os.path.join("metrics" , _A ) , *_A , **_A )
with patch("datasets.load_metric" ) as mock_load_metric:
snake_case : Union[str, Any] = load_local_metric
yield
@classmethod
def lowerCamelCase_ ( cls , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def wrapper(SCREAMING_SNAKE_CASE ):
snake_case : Optional[Any] = contextmanager(_A )
snake_case : Union[str, Any] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def UpperCamelCase__ ( lowercase__ : Union[str, Any] ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE ):
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
snake_case : Optional[int] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def UpperCamelCase__ ( lowercase__ : Union[str, Any] ):
import torch
def bert_cos_score_idf(lowercase__ : Dict , lowercase__ : List[Any] , *lowercase__ : int , **lowercase__ : Dict ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__lowerCamelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
snake_case : Any = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def UpperCamelCase__ ( lowercase__ : List[Any] ):
def load_from_checkpoint(lowercase__ : Dict ):
class lowerCamelCase__ :
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
assert len(_A ) == 2
snake_case : Any = [0.19, 0.92]
return scores, sum(_A ) / len(_A )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
snake_case : Optional[Any] = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
snake_case : Optional[Any] = load_from_checkpoint
yield
def UpperCamelCase__ ( ):
snake_case : Optional[int] = load_metric(os.path.join("metrics" , "seqeval" ) )
snake_case : Optional[int] = "ERROR"
snake_case : List[Any] = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(__lowerCamelCase , match=re.escape(__lowerCamelCase ) ):
metric.compute(predictions=[] , references=[] , scheme=__lowerCamelCase )
| 148 |
def A__ ( __lowerCamelCase = 10_00 ):
SCREAMING_SNAKE_CASE_ = 2**power
SCREAMING_SNAKE_CASE_ = 0
while n:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 299 | 0 |
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = len(_UpperCamelCase ), len(grid[0] )
if (
min(_UpperCamelCase , _UpperCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__lowerCAmelCase = 0
count += depth_first_search(_UpperCamelCase , row + 1 , _UpperCamelCase , _UpperCamelCase )
count += depth_first_search(_UpperCamelCase , row - 1 , _UpperCamelCase , _UpperCamelCase )
count += depth_first_search(_UpperCamelCase , _UpperCamelCase , col + 1 , _UpperCamelCase )
count += depth_first_search(_UpperCamelCase , _UpperCamelCase , col - 1 , _UpperCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
A : Optional[int] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=16 , __a=13 , __a=7 , __a=14 , __a=10 , __a=19 , __a=5 , __a=4 , __a=True , __a=16 , __a=2 , __a=4 , __a=4 , __a="gelu" , __a=0.1 , __a=0.1 , __a=[1, 2, 3, 4, 5] , __a=25 , __a=5 , ):
__lowerCAmelCase = d_model
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = prediction_length
__lowerCAmelCase = context_length
__lowerCAmelCase = cardinality
__lowerCAmelCase = num_time_features
__lowerCAmelCase = lags_sequence
__lowerCAmelCase = embedding_dimension
__lowerCAmelCase = is_training
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = context_length
__lowerCAmelCase = prediction_length + label_length
__lowerCAmelCase = label_length
__lowerCAmelCase = moving_average
__lowerCAmelCase = autocorrelation_factor
def snake_case ( self ):
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def snake_case ( self , __a ):
__lowerCAmelCase = config.context_length + max(config.lags_sequence )
__lowerCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] )
__lowerCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__lowerCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
__lowerCAmelCase = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def snake_case ( self ):
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = self.prepare_autoformer_inputs_dict(__a )
return config, inputs_dict
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case ( self , __a , __a ):
__lowerCAmelCase = AutoformerModel(config=__a ).to(__a ).eval()
__lowerCAmelCase = model(**__a )
__lowerCAmelCase = outputs.encoder_last_hidden_state
__lowerCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_encoder()
encoder.save_pretrained(__a )
__lowerCAmelCase = AutoformerEncoder.from_pretrained(__a ).to(__a )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.create_network_inputs(**__a )
__lowerCAmelCase , __lowerCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__lowerCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__lowerCAmelCase = encoder(inputs_embeds=__a )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__lowerCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__lowerCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__lowerCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__lowerCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = model.get_decoder()
decoder.save_pretrained(__a )
__lowerCAmelCase = AutoformerDecoder.from_pretrained(__a ).to(__a )
__lowerCAmelCase = decoder(
trend=__a , inputs_embeds=__a , encoder_hidden_states=__a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =(AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__UpperCAmelCase : List[Any] =(AutoformerForPrediction,) if is_torch_available() else ()
__UpperCAmelCase : Tuple ={"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
__UpperCAmelCase : Tuple =False
__UpperCAmelCase : Any =False
__UpperCAmelCase : Dict =False
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : Union[str, Any] =False
__UpperCAmelCase : Optional[Any] =False
def snake_case ( self ):
__lowerCAmelCase = AutoformerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , has_text_modality=__a )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowerCAmelCase , __lowerCAmelCase = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["missing_keys"] , [] )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
@unittest.skip(reason="Model has no tokens embeddings" )
def snake_case ( self ):
pass
def snake_case ( self ):
__lowerCAmelCase = inspect.signature(getattr(__a , "forward" ) )
# The main input is the name of the argument after `self`
__lowerCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __a )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(__a )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(__a )] , __a )
def snake_case ( self ):
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = getattr(self.model_tester , "seq_length" , __a )
__lowerCAmelCase = getattr(self.model_tester , "decoder_seq_length" , __a )
__lowerCAmelCase = getattr(self.model_tester , "encoder_seq_length" , __a )
__lowerCAmelCase = getattr(self.model_tester , "d_model" , __a )
__lowerCAmelCase = getattr(self.model_tester , "num_attention_heads" , __a )
__lowerCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
__lowerCAmelCase = outputs.encoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__lowerCAmelCase = len(__a )
__lowerCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__a , __a )
# decoder attentions
__lowerCAmelCase = outputs.decoder_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__lowerCAmelCase = outputs.cross_attentions
self.assertIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 2 , len(__a ) )
__lowerCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def snake_case ( self ):
super().test_retain_grad_hidden_states_attentions()
def _lowerCamelCase ( _UpperCamelCase="train-batch.pt" ):
'''simple docstring'''
__lowerCAmelCase = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=_UpperCamelCase , repo_type="dataset" )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=_UpperCamelCase )
return batch
@require_torch
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a )
__lowerCAmelCase = prepare_batch()
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
__lowerCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __a )
__lowerCAmelCase = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def snake_case ( self ):
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a )
__lowerCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
__lowerCAmelCase = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
__lowerCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __a )
__lowerCAmelCase = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=__a )
self.assertTrue(torch.allclose(output[0, :3, :3] , __a , atol=__a ) )
def snake_case ( self ):
__lowerCAmelCase = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(__a )
__lowerCAmelCase = prepare_batch("val-batch.pt" )
with torch.no_grad():
__lowerCAmelCase = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
__lowerCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __a )
__lowerCAmelCase = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=__a )
__lowerCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __a , rtol=1e-1 ) )
| 259 | 1 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_lowerCamelCase ={
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None ):
# Initialise PyTorch model
lowerCamelCase : str = XLNetConfig.from_json_file(lowerCamelCase )
lowerCamelCase : str = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
lowerCamelCase : List[str] = finetuning_task
lowerCamelCase : List[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowerCamelCase : Union[str, Any] = XLNetForSequenceClassification(lowerCamelCase )
elif "squad" in finetuning_task:
lowerCamelCase : Union[str, Any] = finetuning_task
lowerCamelCase : List[Any] = XLNetForQuestionAnswering(lowerCamelCase )
else:
lowerCamelCase : List[str] = XLNetLMHeadModel(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Save pytorch-model
lowerCamelCase : List[Any] = os.path.join(lowerCamelCase, lowerCamelCase )
lowerCamelCase : Optional[int] = os.path.join(lowerCamelCase, lowerCamelCase )
print(F'''Save PyTorch model to {os.path.abspath(lowerCamelCase )}''' )
torch.save(model.state_dict(), lowerCamelCase )
print(F'''Save configuration file to {os.path.abspath(lowerCamelCase )}''' )
with open(lowerCamelCase, """w""", encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
_lowerCamelCase =parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 287 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
_lowerCamelCase ={
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 1_2_8,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 5_0,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 1_0,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 1_0,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class A__ ( unittest.TestCase):
@classmethod
def UpperCamelCase__ ( cls ):
lowerCamelCase : int = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def UpperCamelCase__ ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def UpperCamelCase__ ( self ):
lowerCamelCase : List[Any] = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
lowerCamelCase : Any = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__magic_name__ , repo_id="""test-config""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : Optional[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
lowerCamelCase : Dict = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
lowerCamelCase : Optional[int] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__magic_name__ , repo_id="""valid_org/test-config-org""" , push_to_hub=__magic_name__ , use_auth_token=self._token )
lowerCamelCase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def UpperCamelCase__ ( self ):
CustomConfig.register_for_auto_class()
lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
lowerCamelCase : List[str] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 4_2 )
class A__ ( unittest.TestCase):
def UpperCamelCase__ ( self ):
lowerCamelCase : str = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase : Optional[int] = c.n_embd + 1 # int
lowerCamelCase : Optional[int] = c.resid_pdrop + 1.0 # float
lowerCamelCase : Tuple = not c.scale_attn_weights # bool
lowerCamelCase : Any = c.summary_type + """foo""" # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(__magic_name__ , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__magic_name__ , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__magic_name__ , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__magic_name__ , c.summary_type , """mismatch for key: summary_type""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : str = PretrainedConfig()
lowerCamelCase : int = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__magic_name__ , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
lowerCamelCase : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(__magic_name__ , __magic_name__ )]
if len(__magic_name__ ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F''' {", ".join(__magic_name__ )}.''' )
def UpperCamelCase__ ( self ):
with self.assertRaises(__magic_name__ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
lowerCamelCase : str = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase : Dict = mock.Mock()
lowerCamelCase : Optional[int] = 5_0_0
lowerCamelCase : List[Any] = {}
lowerCamelCase : Tuple = HTTPError
lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
lowerCamelCase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__magic_name__ ) as mock_head:
lowerCamelCase : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase__ ( self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def UpperCamelCase__ ( self ):
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""bert-base-cased""" )
lowerCamelCase : Optional[Any] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__magic_name__ )
lowerCamelCase : str = 2
json.dump(configuration.to_dict() , open(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase : Any = ["""config.42.0.0.json"""]
lowerCamelCase : Optional[Any] = 7_6_8
configuration.save_pretrained(__magic_name__ )
shutil.move(os.path.join(__magic_name__ , """config.4.0.0.json""" ) , os.path.join(__magic_name__ , """config.42.0.0.json""" ) )
lowerCamelCase : int = AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 7_6_8 )
def UpperCamelCase__ ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
lowerCamelCase : str = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
lowerCamelCase : Tuple = """v4.0.0"""
lowerCamelCase , lowerCamelCase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__magic_name__ , return_unused_kwargs=__magic_name__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__magic_name__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase : Tuple = """v3.0.0"""
lowerCamelCase : Any = old_transformers.models.auto.AutoConfig.from_pretrained(__magic_name__ )
self.assertEqual(old_configuration.hidden_size , 7_6_8 )
| 287 | 1 |
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCamelCase__ = "pt"
elif is_tf_available():
lowerCamelCase__ = "tf"
else:
lowerCamelCase__ = "jax"
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = ByTaTokenizer
A_ : Union[str, Any] = False
def __lowerCamelCase ( self ):
super().setUp()
__lowerCAmelCase : Optional[int] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCamelCase ( self ):
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_snake_case )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=20 , _SCREAMING_SNAKE_CASE=5 ):
__lowerCAmelCase : List[str] = []
for i in range(len(_snake_case ) ):
try:
__lowerCAmelCase : Union[str, Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_snake_case )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowerCAmelCase : Optional[int] = list(filter(lambda _SCREAMING_SNAKE_CASE : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _snake_case ) )
__lowerCAmelCase : Tuple = list(filter(lambda _SCREAMING_SNAKE_CASE : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_snake_case ) , _snake_case ) )
if max_length is not None and len(_snake_case ) > max_length:
__lowerCAmelCase : Any = toks[:max_length]
if min_length is not None and len(_snake_case ) < min_length and len(_snake_case ) > 0:
while len(_snake_case ) < min_length:
__lowerCAmelCase : Dict = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCAmelCase : List[Any] = [t[0] for t in toks]
# Ensure consistency
__lowerCAmelCase : Union[str, Any] = tokenizer.decode(_snake_case , clean_up_tokenization_spaces=_snake_case )
if " " not in output_txt and len(_snake_case ) > 1:
__lowerCAmelCase : Tuple = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_snake_case )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_snake_case )
)
if with_prefix_space:
__lowerCAmelCase : Tuple = ' ' + output_txt
__lowerCAmelCase : int = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
return output_txt, output_ids
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.ta_base_tokenizer
__lowerCAmelCase : Any = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
__lowerCAmelCase : List[str] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.ta_base_tokenizer
__lowerCAmelCase : Dict = 'Unicode €.'
__lowerCAmelCase : str = tokenizer(_snake_case )
__lowerCAmelCase : List[str] = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded['input_ids'] , _snake_case )
# decoding
__lowerCAmelCase : Dict = tokenizer.decode(_snake_case )
self.assertEqual(_snake_case , 'Unicode €.</s>' )
__lowerCAmelCase : Tuple = tokenizer('e è é ê ë' )
__lowerCAmelCase : str = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded['input_ids'] , _snake_case )
# decoding
__lowerCAmelCase : Optional[Any] = tokenizer.decode(_snake_case )
self.assertEqual(_snake_case , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.ta_base_tokenizer
__lowerCAmelCase : Optional[int] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__lowerCAmelCase : Dict = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
__lowerCAmelCase : List[Any] = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
if FRAMEWORK != "jax":
__lowerCAmelCase : int = list(batch.input_ids.numpy()[0] )
else:
__lowerCAmelCase : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_snake_case , _snake_case )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.ta_base_tokenizer
__lowerCAmelCase : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase : Union[str, Any] = tokenizer(_snake_case , padding=_snake_case , return_tensors=_snake_case )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _snake_case )
self.assertIn('attention_mask' , _snake_case )
self.assertNotIn('decoder_input_ids' , _snake_case )
self.assertNotIn('decoder_attention_mask' , _snake_case )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.ta_base_tokenizer
__lowerCAmelCase : Union[str, Any] = [
'Summary of the text.',
'Another summary.',
]
__lowerCAmelCase : Dict = tokenizer(
text_target=_snake_case , max_length=32 , padding='max_length' , truncation=_snake_case , return_tensors=_snake_case )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.ta_base_tokenizer
__lowerCAmelCase : Optional[int] = ['A long paragraph for summarization. </s>']
__lowerCAmelCase : Optional[int] = ['Summary of the text. </s>']
# fmt: off
__lowerCAmelCase : List[Any] = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
__lowerCAmelCase : Tuple = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
__lowerCAmelCase : List[Any] = tokenizer(_snake_case , text_target=_snake_case )
self.assertEqual(_snake_case , batch['input_ids'][0] )
self.assertEqual(_snake_case , batch['labels'][0] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowerCAmelCase : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCAmelCase : List[str] = tempfile.mkdtemp()
__lowerCAmelCase : Optional[int] = ' He is very happy, UNwant\u00E9d,running'
__lowerCAmelCase : str = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
tokenizer.save_pretrained(_snake_case )
__lowerCAmelCase : Union[str, Any] = tokenizer.__class__.from_pretrained(_snake_case )
__lowerCAmelCase : Any = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
shutil.rmtree(_snake_case )
__lowerCAmelCase : int = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCAmelCase : Tuple = tempfile.mkdtemp()
__lowerCAmelCase : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__lowerCAmelCase : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__lowerCAmelCase : Dict = tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
tokenizer.save_pretrained(_snake_case )
__lowerCAmelCase : Union[str, Any] = tokenizer.__class__.from_pretrained(_snake_case )
__lowerCAmelCase : Union[str, Any] = after_tokenizer.encode(_snake_case , add_special_tokens=_snake_case )
self.assertListEqual(_snake_case , _snake_case )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowerCAmelCase : List[str] = tokenizer.__class__.from_pretrained(_snake_case , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_snake_case )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case )
with open(os.path.join(_snake_case , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__lowerCAmelCase : List[str] = json.load(_snake_case )
with open(os.path.join(_snake_case , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__lowerCAmelCase : int = json.load(_snake_case )
__lowerCAmelCase : Optional[int] = [f"<extra_id_{i}>" for i in range(1_25 )]
__lowerCAmelCase : List[str] = added_tokens_extra_ids + [
'an_additional_special_token'
]
__lowerCAmelCase : str = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_snake_case , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_snake_case , _snake_case )
with open(os.path.join(_snake_case , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_snake_case , _snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCAmelCase : int = tokenizer_class.from_pretrained(
_snake_case , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCAmelCase : Optional[Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_snake_case )]
__lowerCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained(
_snake_case , additional_special_tokens=_snake_case , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case )
__lowerCAmelCase : Union[str, Any] = tokenizer_class.from_pretrained(_snake_case )
self.assertTrue(tokenizer.decode([2_55] ) == '' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.get_tokenizers(fast=_snake_case , do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowerCAmelCase : Dict = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
__lowerCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_string(_snake_case )
self.assertIsInstance(_snake_case , _snake_case )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowerCAmelCase : List[Any] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__lowerCAmelCase : Any = 0
__lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(
_snake_case , skip_special_tokens=_snake_case )
for attr in attributes_list:
setattr(_snake_case , attr + '_id' , _snake_case )
self.assertEqual(getattr(_snake_case , _snake_case ) , _snake_case )
self.assertEqual(getattr(_snake_case , attr + '_id' ) , _snake_case )
setattr(_snake_case , attr + '_id' , _snake_case )
self.assertEqual(getattr(_snake_case , _snake_case ) , _snake_case )
self.assertEqual(getattr(_snake_case , attr + '_id' ) , _snake_case )
setattr(_snake_case , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(_snake_case , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(_snake_case , 'additional_special_tokens_ids' ) , [] )
setattr(_snake_case , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_snake_case , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_snake_case , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 357 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowerCamelCase)
class A__ ( _lowerCamelCase):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A_ : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True})
A_ : ClassVar[Features] = Features({'text': Value('string')})
A_ : ClassVar[Features] = Features({'labels': ClassLabel})
A_ : str = "text"
A_ : str = "labels"
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if self.label_column not in features:
raise ValueError(f"Column {self.label_column} is not present in features." )
if not isinstance(features[self.label_column] , _SCREAMING_SNAKE_CASE ):
raise ValueError(f"Column {self.label_column} is not a ClassLabel." )
__lowerCAmelCase : Any = copy.deepcopy(self )
__lowerCAmelCase : Dict = self.label_schema.copy()
__lowerCAmelCase : List[Any] = features[self.label_column]
__lowerCAmelCase : Dict = label_schema
return task_template
@property
def __lowerCamelCase ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 182 | 0 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _snake_case( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray ) -> float:
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
def _snake_case( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray ) -> list[list[list[float] | float]]:
'''simple docstring'''
if dataset.ndim != value_array.ndim:
A__ = (
'Wrong input data\'s dimensions... '
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
try:
if dataset.shape[1] != value_array.shape[1]:
A__ = (
'Wrong input data\'s shape... '
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
A__ = (
'Input data have different datatype... '
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
A__ = []
for value in value_array:
A__ = euclidean(SCREAMING_SNAKE_CASE__ , dataset[0] )
A__ = dataset[0].tolist()
for dataset_value in dataset[1:]:
A__ = euclidean(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if dist > temp_dist:
A__ = temp_dist
A__ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _snake_case( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray ) -> float:
'''simple docstring'''
return np.dot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / (norm(SCREAMING_SNAKE_CASE__ ) * norm(SCREAMING_SNAKE_CASE__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class A ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 'resnet'
lowerCamelCase = ['basic', 'bottleneck']
def __init__( self : Optional[Any],lowercase_ : int=3,lowercase_ : List[str]=6_4,lowercase_ : int=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8],lowercase_ : Tuple=[3, 4, 6, 3],lowercase_ : Union[str, Any]="bottleneck",lowercase_ : List[str]="relu",lowercase_ : Tuple=False,lowercase_ : List[str]=None,lowercase_ : List[Any]=None,**lowercase_ : str,)-> Optional[Any]:
'''simple docstring'''
super().__init__(**lowercase_ )
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
A__ = num_channels
A__ = embedding_size
A__ = hidden_sizes
A__ = depths
A__ = layer_type
A__ = hidden_act
A__ = downsample_in_first_stage
A__ = ['stem'] + [F'stage{idx}' for idx in range(1,len(lowercase_ ) + 1 )]
A__ , A__ = get_aligned_output_features_output_indices(
out_features=lowercase_,out_indices=lowercase_,stage_names=self.stage_names )
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = version.parse('1.11' )
@property
def snake_case__ ( self : List[Any] )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case__ ( self : Any )-> float:
'''simple docstring'''
return 1E-3
| 7 | 1 |
import requests
_snake_case : Any = '''YOUR API KEY'''
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : str = giphy_api_key ):
__lowerCAmelCase = """+""".join(query.split() )
__lowerCAmelCase = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
__lowerCAmelCase = requests.get(lowerCAmelCase_ ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 368 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , lowerCAmelCase_ : int = 6_5_5_3_6 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : str = "fourier" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Tuple[int] = (3_2, 3_2, 6_4) , lowerCAmelCase_ : str = None , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : bool = False , ) -> Optional[int]:
super().__init__()
__lowerCAmelCase = sample_size
# time
if time_embedding_type == "fourier":
__lowerCAmelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCAmelCase_ , log=lowerCAmelCase_ , flip_sin_to_cos=lowerCAmelCase_ )
__lowerCAmelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__lowerCAmelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCAmelCase_ , downscale_freq_shift=lowerCAmelCase_ )
__lowerCAmelCase = block_out_channels[0]
if use_timestep_embedding:
__lowerCAmelCase = block_out_channels[0] * 4
__lowerCAmelCase = TimestepEmbedding(
in_channels=lowerCAmelCase_ , time_embed_dim=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , out_dim=block_out_channels[0] , )
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = None
# down
__lowerCAmelCase = in_channels
for i, down_block_type in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__lowerCAmelCase = i == len(lowerCAmelCase_ ) - 1
__lowerCAmelCase = get_down_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
__lowerCAmelCase = get_mid_block(
lowerCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCAmelCase_ , add_downsample=lowerCAmelCase_ , )
# up
__lowerCAmelCase = list(reversed(lowerCAmelCase_ ) )
__lowerCAmelCase = reversed_block_out_channels[0]
if out_block_type is None:
__lowerCAmelCase = out_channels
else:
__lowerCAmelCase = block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = (
reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase_ ) - 1 else final_upsample_channels
)
__lowerCAmelCase = i == len(lowerCAmelCase_ ) - 1
__lowerCAmelCase = get_up_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCAmelCase_ )
__lowerCAmelCase = output_channel
# out
__lowerCAmelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
__lowerCAmelCase = get_out_block(
out_block_type=lowerCAmelCase_ , num_groups_out=lowerCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[torch.Tensor, float, int] , lowerCAmelCase_ : bool = True , ) -> Union[UNetaDOutput, Tuple]:
__lowerCAmelCase = timestep
if not torch.is_tensor(lowerCAmelCase_ ):
__lowerCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCAmelCase_ ) and len(timesteps.shape ) == 0:
__lowerCAmelCase = timesteps[None].to(sample.device )
__lowerCAmelCase = self.time_proj(lowerCAmelCase_ )
if self.config.use_timestep_embedding:
__lowerCAmelCase = self.time_mlp(lowerCAmelCase_ )
else:
__lowerCAmelCase = timestep_embed[..., None]
__lowerCAmelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__lowerCAmelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__lowerCAmelCase = ()
for downsample_block in self.down_blocks:
__lowerCAmelCase , __lowerCAmelCase = downsample_block(hidden_states=lowerCAmelCase_ , temb=lowerCAmelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__lowerCAmelCase = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__lowerCAmelCase = down_block_res_samples[-1:]
__lowerCAmelCase = down_block_res_samples[:-1]
__lowerCAmelCase = upsample_block(lowerCAmelCase_ , res_hidden_states_tuple=lowerCAmelCase_ , temb=lowerCAmelCase_ )
# 5. post-process
if self.out_block:
__lowerCAmelCase = self.out_block(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCAmelCase_ )
| 207 | 0 |
"""simple docstring"""
A: Dict = 8.314_4598
def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ):
if temperature < 0:
raise Exception("""Temperature cannot be less than 0 K""" )
if molar_mass <= 0:
raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
A: Dict = 3_0_0
A: Dict = 2_8
A: str = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 109 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def _snake_case ( UpperCamelCase : int = 1000000 , UpperCamelCase : int = 10 ):
UpperCAmelCase : defaultdict = defaultdict(UpperCamelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase : str = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase : Optional[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(UpperCamelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109 | 1 |
from __future__ import annotations
def __snake_case ( _UpperCAmelCase ):
__a = len(lowerCamelCase_ ) // 2
# choose the middle 3 elements
__a = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __snake_case ( _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _A :
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim))
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim))
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int]=None , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = {'''vision_model''': vision_model, '''text_model''': text_model}
__a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim))
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict=None , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
__a = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)
__a = after_output[0].numpy()
__a = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-5)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE)
__a = model(
input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE)
__a = output.vision_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = to_atuple(vision_model.config.image_size)
__a = to_atuple(vision_model.config.patch_size)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__a = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__a = output.text_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float):
'''simple docstring'''
__a = np.abs((a - b)).max()
self.assertLessEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , F'Difference between torch and flax is {diff} (>= {tol}).')
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_save_load(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a , __a = self.get_pretrained_model_and_inputs()
__a = model_a(**__SCREAMING_SNAKE_CASE)
__a = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = model_a(**__SCREAMING_SNAKE_CASE)
__a = after_outputs[0].numpy()
__a = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1E-5)
@require_tf
class _A ( __UpperCAmelCase ,unittest.TestCase ):
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-random-bert''')
__a = 13
__a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
__a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
__a = random_attention_mask([batch_size, 4])
__a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = TFViTModel(__SCREAMING_SNAKE_CASE , name='''vision_model''')
__a = TFBertModel(__SCREAMING_SNAKE_CASE , name='''text_model''')
return vision_model, text_model
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = TFViTModelTester(self)
__a = TFBertModelTester(self)
__a = vit_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a , __a = vision_config_and_inputs
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _A ( __UpperCAmelCase ,unittest.TestCase ):
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-deit-tf''' , '''hf-internal-testing/tiny-random-roberta''')
__a = 13
__a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
__a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
__a = random_attention_mask([batch_size, 4])
__a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any]=None , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a , __a = self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE)
__a = model(
input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE)
__a = output.vision_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__a = to_atuple(vision_model.config.image_size)
__a = to_atuple(vision_model.config.patch_size)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__a = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
__a = output.text_model_output.attentions
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = TFDeiTModel(__SCREAMING_SNAKE_CASE , name='''vision_model''')
__a = TFRobertaModel(__SCREAMING_SNAKE_CASE , name='''text_model''')
return vision_model, text_model
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = TFDeiTModelTester(self)
__a = TFRobertaModelTester(self)
__a = vit_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a , __a = vision_config_and_inputs
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _A ( __UpperCAmelCase ,unittest.TestCase ):
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'''Rocketknight1/tiny-random-clip-tf''' , '''hf-internal-testing/tiny-random-bert''')
__a = 13
__a = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
__a = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
__a = random_attention_mask([batch_size, 4])
__a = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = TFCLIPVisionModel(__SCREAMING_SNAKE_CASE , name='''vision_model''')
__a = TFBertModel(__SCREAMING_SNAKE_CASE , name='''text_model''')
return vision_model, text_model
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = TFCLIPVisionModelTester(self)
__a = TFBertModelTester(self)
__a = clip_model_tester.prepare_config_and_inputs()
__a = bert_model_tester.prepare_config_and_inputs()
__a , __a = vision_config_and_inputs
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _A ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = TFVisionTextDualEncoderModel.from_pretrained(
'''clip-italian/clip-italian''' , logit_scale_init_value=1.0 , from_pt=__SCREAMING_SNAKE_CASE)
__a = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''')
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
__a = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
__a = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__a = np.array([[1.2_28_47_27, 0.3_10_41_22]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __SCREAMING_SNAKE_CASE , atol=1E-3))
| 131 | 0 |
'''simple docstring'''
import os
from math import logaa
def lowerCAmelCase_ ( snake_case_ : str = "base_exp.txt" ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ):
UpperCAmelCase_ , UpperCAmelCase_ = list(map(snake_case_ , line.split("," ) ) )
if x * logaa(snake_case_ ) > largest:
UpperCAmelCase_ = x * logaa(snake_case_ )
UpperCAmelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 1 |
'''simple docstring'''
import pprint
import requests
UpperCamelCase__ = '''https://zenquotes.io/api'''
def a__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def a__ ( ) -> list:
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
UpperCamelCase__ = random_quotes()
pprint.pprint(response)
| 181 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 370 |
'''simple docstring'''
__lowerCAmelCase = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowerCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : List[str] = 'Morse code here!'
print(__a )
_a : Tuple = encrypt(__a )
print(__a )
_a : str = decrypt(__a )
print(__a )
if __name__ == "__main__":
main()
| 5 | 0 |
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase ) -> Optional[int]:
if not head:
return True
# split the list to two parts
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = head.next, head
while fast and fast.next:
lowerCAmelCase__ : Any = fast.next.next
lowerCAmelCase__ : List[str] = slow.next
lowerCAmelCase__ : Tuple = slow.next
lowerCAmelCase__ : Dict = None # Don't forget here! But forget still works!
# reverse the second part
lowerCAmelCase__ : Dict = None
while second:
lowerCAmelCase__ : Tuple = second.next
lowerCAmelCase__ : Any = node
lowerCAmelCase__ : Tuple = second
lowerCAmelCase__ : List[Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCAmelCase__ : Optional[Any] = node.next
lowerCAmelCase__ : Union[str, Any] = head.next
return True
def lowercase_ ( __UpperCAmelCase ) -> Optional[Any]:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCAmelCase__ : List[str] = head
while fast and fast.next:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCAmelCase__ : List[Any] = [slow.val]
while slow.next:
lowerCAmelCase__ : Any = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCAmelCase__ : str = cur.next
return True
def lowercase_ ( __UpperCAmelCase ) -> str:
if not head or not head.next:
return True
lowerCAmelCase__ : str = {}
lowerCAmelCase__ : int = 0
while head:
if head.val in d:
d[head.val].append(__UpperCAmelCase )
else:
lowerCAmelCase__ : str = [pos]
lowerCAmelCase__ : Any = head.next
pos += 1
lowerCAmelCase__ : Optional[int] = pos - 1
lowerCAmelCase__ : Any = 0
for v in d.values():
if len(__UpperCAmelCase ) % 2 != 0:
middle += 1
else:
lowerCAmelCase__ : Dict = 0
for i in range(0 , len(__UpperCAmelCase ) ):
if v[i] + v[len(__UpperCAmelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 242 |
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase = 0 ) -> list:
lowerCAmelCase__ : Optional[Any] = length or len(__UpperCAmelCase )
lowerCAmelCase__ : int = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = list_data[i + 1], list_data[i]
lowerCAmelCase__ : List[str] = True
return list_data if not swapped else bubble_sort(__UpperCAmelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 | 1 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any]):
lowercase__ : Any = UniSpeechSatForSequenceClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase)
lowercase__ : Union[str, Any] = downstream_dict["projector.weight"]
lowercase__ : int = downstream_dict["projector.bias"]
lowercase__ : int = downstream_dict["model.post_net.linear.weight"]
lowercase__ : int = downstream_dict["model.post_net.linear.bias"]
return model
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : Dict):
lowercase__ : Any = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowerCamelCase , config=_lowerCamelCase)
lowercase__ : int = downstream_dict["model.linear.weight"]
lowercase__ : str = downstream_dict["model.linear.bias"]
return model
def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : str , _lowerCamelCase : List[str]):
lowercase__ : Tuple = UniSpeechSatForXVector.from_pretrained(_lowerCamelCase , config=_lowerCamelCase)
lowercase__ : Union[str, Any] = downstream_dict["connector.weight"]
lowercase__ : str = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel):
lowercase__ : Tuple = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
lowercase__ : Union[str, Any] = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
lowercase__ : Tuple = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
lowercase__ : Tuple = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
lowercase__ : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
lowercase__ : Tuple = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
lowercase__ : Optional[Any] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict):
lowercase__ : str = torch.load(_lowerCamelCase , map_location="cpu")
lowercase__ : Optional[Any] = checkpoint["Downstream"]
lowercase__ : Dict = UniSpeechSatConfig.from_pretrained(_lowerCamelCase)
lowercase__ : str = WavaVecaFeatureExtractor.from_pretrained(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , do_normalize=_lowerCamelCase)
lowercase__ : Union[str, Any] = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification"):
lowercase__ : int = convert_classification(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
elif arch.endswith("ForAudioFrameClassification"):
lowercase__ : List[str] = convert_diarization(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
elif arch.endswith("ForXVector"):
lowercase__ : Any = convert_xvector(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''')
if hf_config.use_weighted_layer_sum:
lowercase__ : Tuple = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_lowerCamelCase)
hf_model.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
UpperCamelCase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 333 |
def lowercase_ ( _lowerCamelCase : list):
for i in range(len(_lowerCamelCase) - 1 , 0 , -1):
lowercase__ : int = False
for j in range(_lowerCamelCase , 0 , -1):
if unsorted[j] < unsorted[j - 1]:
lowercase__ , lowercase__ : int = unsorted[j - 1], unsorted[j]
lowercase__ : List[str] = True
for j in range(_lowerCamelCase):
if unsorted[j] > unsorted[j + 1]:
lowercase__ , lowercase__ : Optional[int] = unsorted[j + 1], unsorted[j]
lowercase__ : Dict = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(f"{cocktail_shaker_sort(unsorted) = }")
| 333 | 1 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=18 , _UpperCamelCase=30 , _UpperCamelCase=400 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , ) -> Tuple:
lowerCAmelCase_ = size if size is not None else {"height": 18, "width": 18}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_normalize
def __a ( self ) -> Optional[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
_lowercase =ImageGPTImageProcessor if is_vision_available() else None
def __a ( self ) -> List[str]:
lowerCAmelCase_ = ImageGPTImageProcessingTester(self )
@property
def __a ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "clusters" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
lowerCAmelCase_ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_UpperCamelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , _UpperCamelCase )
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase_ = os.path.join(_UpperCamelCase , "image_processor.json" )
image_processor_first.to_json_file(_UpperCamelCase )
lowerCAmelCase_ = self.image_processing_class.from_json_file(_UpperCamelCase ).to_dict()
lowerCAmelCase_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_UpperCamelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _UpperCamelCase )
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_UpperCamelCase )
lowerCAmelCase_ = self.image_processing_class.from_pretrained(_UpperCamelCase ).to_dict()
lowerCAmelCase_ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_UpperCamelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _UpperCamelCase )
@unittest.skip("ImageGPT requires clusters at initialization" )
def __a ( self ) -> List[Any]:
pass
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
lowerCAmelCase_ = Image.open(dataset[4]["file"] )
lowerCAmelCase_ = Image.open(dataset[5]["file"] )
lowerCAmelCase_ = [imagea, imagea]
return images
@require_vision
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Dict:
lowerCAmelCase_ = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
lowerCAmelCase_ = prepare_images()
# test non-batched
lowerCAmelCase_ = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
lowerCAmelCase_ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _UpperCamelCase )
# test batched
lowerCAmelCase_ = image_processing(_UpperCamelCase , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
lowerCAmelCase_ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _UpperCamelCase )
| 231 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__A = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCamelCase__ ( lowercase__ : Any ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase__ )
def UpperCamelCase__ ( lowercase__ : Optional[int] ):
from transformers.testing_utils import pytest_terminal_summary_main
snake_case : Any = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
| 148 | 0 |
def lowerCAmelCase_ ( UpperCamelCase_ = 1000000 ) -> int:
UpperCamelCase_ = set(range(3 , __lowerCAmelCase , 2 ) )
primes.add(2 )
for p in range(3 , __lowerCAmelCase , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __lowerCAmelCase , __lowerCAmelCase ) ) )
UpperCamelCase_ = [float(__lowerCAmelCase ) for n in range(limit + 1 )]
for p in primes:
for n in range(__lowerCAmelCase , limit + 1 , __lowerCAmelCase ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 353 |
def lowerCAmelCase_ ( UpperCamelCase_ ) -> list:
UpperCamelCase_ = int(UpperCamelCase_ )
if n_element < 1:
UpperCamelCase_ = ValueError("a should be a positive number" )
raise my_error
UpperCamelCase_ = [1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = (0, 0, 0)
UpperCamelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_UpperCAmelCase = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
_UpperCAmelCase = hamming(int(n))
print('-----------------------------------------------------')
print(f'''The list with nth numbers is: {hamming_numbers}''')
print('-----------------------------------------------------')
| 328 | 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,**_lowerCAmelCase ) -> Dict:
__lowerCamelCase : Any = [x.strip() for x in open(__lowerCAmelCase ).readlines()]
__lowerCamelCase : int = [x.strip() for x in open(__lowerCAmelCase ).readlines()][: len(__lowerCAmelCase )]
__lowerCamelCase : str = calculate_rouge(__lowerCAmelCase ,__lowerCAmelCase ,**__lowerCAmelCase )
if save_path is not None:
save_json(__lowerCAmelCase ,__lowerCAmelCase ,indent=__lowerCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 208 |
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = (CMStochasticIterativeScheduler,)
lowercase__ = 10
def _UpperCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCAmelCase_)
return config
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = 1_0
lowercase_ = self.get_scheduler_config()
lowercase_ = self.scheduler_classes[0](**lowerCAmelCase_)
scheduler.set_timesteps(lowerCAmelCase_)
lowercase_ = scheduler.timesteps[0]
lowercase_ = scheduler.timesteps[1]
lowercase_ = self.dummy_sample
lowercase_ = 0.1 * sample
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = 1
scheduler.set_timesteps(lowerCAmelCase_)
lowercase_ = scheduler.timesteps
lowercase_ = torch.manual_seed(0)
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCAmelCase_):
# 1. scale model input
lowercase_ = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_)
# 2. predict noise residual
lowercase_ = model(lowerCAmelCase_ , lowerCAmelCase_)
# 3. predict previous sample x_t-1
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(lowerCAmelCase_))
lowercase_ = torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 192.7_614) < 1E-2
assert abs(result_mean.item() - 0.2_510) < 1E-3
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [1_0_6, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_)
lowercase_ = scheduler.timesteps
lowercase_ = torch.manual_seed(0)
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase_ = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_)
# 2. predict noise residual
lowercase_ = model(lowerCAmelCase_ , lowerCAmelCase_)
# 3. predict previous sample x_t-1
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(lowerCAmelCase_))
lowercase_ = torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 347.6_357) < 1E-2
assert abs(result_mean.item() - 0.4_527) < 1E-3
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [3_9, 3_0, 1_2, 1_5, 0]
with self.assertRaises(lowerCAmelCase_ , msg="""`timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [3_9, 3_0, 1_2, 1, 0]
lowercase_ = len(lowerCAmelCase_)
with self.assertRaises(lowerCAmelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`."""):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_)
| 136 | 0 |
"""simple docstring"""
__UpperCamelCase : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def __SCREAMING_SNAKE_CASE ( A_ ):
# Make sure the supplied data is a bytes-like object
if not isinstance(A_ , A_ ):
lowerCAmelCase__ : Dict = f'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(A_ )
lowerCAmelCase__ : Any = ''''''.join(bin(A_ )[2:].zfill(8 ) for byte in data )
lowerCAmelCase__ : List[str] = len(A_ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowerCAmelCase__ : List[str] = b'''=''' * ((6 - len(A_ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(A_ ) % 6)
else:
lowerCAmelCase__ : Tuple = b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(A_ ) , 6 ) ).encode()
+ padding
)
def __SCREAMING_SNAKE_CASE ( A_ ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(A_ , A_ ) and not isinstance(A_ , A_ ):
lowerCAmelCase__ : str = (
'''argument should be a bytes-like object or ASCII string, '''
f'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(A_ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(A_ , A_ ):
try:
lowerCAmelCase__ : Union[str, Any] = encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
lowerCAmelCase__ : Union[str, Any] = encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(A_ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowerCAmelCase__ : List[str] = encoded_data[:-padding]
lowerCAmelCase__ : Tuple = ''''''.join(
bin(B64_CHARSET.index(A_ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowerCAmelCase__ : Any = ''''''.join(
bin(B64_CHARSET.index(A_ ) )[2:].zfill(6 ) for char in encoded_data )
lowerCAmelCase__ : List[Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(A_ ) , 8 )
]
return bytes(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__UpperCamelCase : int = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
__UpperCamelCase : Dict = F'''https://www.google.com/search?q={query}&num=100'''
__UpperCamelCase : Tuple = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
__UpperCamelCase : Tuple = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
__UpperCamelCase : Optional[Any] = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 74 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case_ = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
snake_case_ = {
'''yjernite/retribert-base-uncased''': 512,
}
snake_case_ = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase : Optional[int] = RetriBertTokenizer
__lowerCamelCase : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
lowercase__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , a) != do_lower_case
or normalizer_state.get('strip_accents' , a) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a) != tokenize_chinese_chars
):
lowercase__ : Optional[Any] = getattr(a , normalizer_state.pop('type'))
lowercase__ : Union[str, Any] = do_lower_case
lowercase__ : int = strip_accents
lowercase__ : int = tokenize_chinese_chars
lowercase__ : str = normalizer_class(**a)
lowercase__ : Any = do_lower_case
def snake_case_ ( self , a , a=None):
lowercase__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , a , a = None):
lowercase__ : List[Any] = [self.sep_token_id]
lowercase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case_ ( self , a , a = None):
lowercase__ : List[str] = self._tokenizer.model.save(a , name=a)
return tuple(a)
| 214 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Any = """distilbert"""
__lowerCamelCase : List[Any] = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self , a=3_0522 , a=512 , a=False , a=6 , a=12 , a=768 , a=4 * 768 , a=0.1 , a=0.1 , a="gelu" , a=0.02 , a=0.1 , a=0.2 , a=0 , **a , ):
lowercase__ : int = vocab_size
lowercase__ : int = max_position_embeddings
lowercase__ : List[str] = sinusoidal_pos_embds
lowercase__ : Tuple = n_layers
lowercase__ : Optional[int] = n_heads
lowercase__ : List[Any] = dim
lowercase__ : Any = hidden_dim
lowercase__ : Union[str, Any] = dropout
lowercase__ : Optional[int] = attention_dropout
lowercase__ : List[Any] = activation
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Dict = qa_dropout
lowercase__ : Tuple = seq_classif_dropout
super().__init__(**a , pad_token_id=a)
class SCREAMING_SNAKE_CASE__ (__snake_case ):
@property
def snake_case_ ( self):
if self.task == "multiple-choice":
lowercase__ : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase__ : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 214 | 1 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowerCamelCase_ = get_logger(__name__)
class __lowerCamelCase ( enum.Enum ):
lowerCamelCase_ : Dict = 'all_checks'
lowerCamelCase_ : Any = 'basic_checks'
lowerCamelCase_ : Any = 'no_checks'
class __lowerCamelCase ( __snake_case ):
pass
class __lowerCamelCase ( __snake_case ):
pass
class __lowerCamelCase ( __snake_case ):
pass
class __lowerCamelCase ( __snake_case ):
pass
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=None ) -> List[str]:
'''simple docstring'''
if expected_checksums is None:
logger.info("""Unable to verify checksums.""" )
return
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowercase_ ) - set(lowercase_ ) ) )
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowercase_ ) - set(lowercase_ ) ) )
snake_case_ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
snake_case_ = """ for """ + verification_name if verification_name is not None else """"""
if len(lowercase_ ) > 0:
raise NonMatchingChecksumError(
f'''Checksums didn\'t match{for_verification_name}:\n'''
f'''{bad_urls}\n'''
"""Set `verification_mode='no_checks'` to skip checksums verification and ignore this error""" )
logger.info("""All the checksums matched successfully""" + for_verification_name )
class __lowerCamelCase ( __snake_case ):
pass
class __lowerCamelCase ( __snake_case ):
pass
class __lowerCamelCase ( __snake_case ):
pass
class __lowerCamelCase ( __snake_case ):
pass
def UpperCamelCase( lowercase_ , lowercase_ ) -> Dict:
'''simple docstring'''
if expected_splits is None:
logger.info("""Unable to verify splits sizes.""" )
return
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise ExpectedMoreSplits(str(set(lowercase_ ) - set(lowercase_ ) ) )
if len(set(lowercase_ ) - set(lowercase_ ) ) > 0:
raise UnexpectedSplits(str(set(lowercase_ ) - set(lowercase_ ) ) )
snake_case_ = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowercase_ ) > 0:
raise NonMatchingSplitsSizesError(str(lowercase_ ) )
logger.info("""All the splits matched successfully.""" )
def UpperCamelCase( lowercase_ , lowercase_ = True ) -> dict:
'''simple docstring'''
if record_checksum:
snake_case_ = shaaaa()
with open(lowercase_ , """rb""" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"""""" ):
m.update(lowercase_ )
snake_case_ = m.hexdigest()
else:
snake_case_ = None
return {"num_bytes": os.path.getsize(lowercase_ ), "checksum": checksum}
def UpperCamelCase( lowercase_ ) -> List[str]:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 34 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase_ = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=8 ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
snake_case_ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class __lowerCamelCase ( __snake_case ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) -> Optional[int]:
super().__init__()
self.register_modules(
text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , movq=lowerCamelCase , )
snake_case_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[str]:
if latents is None:
snake_case_ = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=lowerCamelCase , dtype=lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
snake_case_ = latents.to(lowerCamelCase )
snake_case_ = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , ) -> Any:
snake_case_ = len(lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else 1
# get prompt text embeddings
snake_case_ = self.tokenizer(
lowerCamelCase , padding="""max_length""" , truncation=lowerCamelCase , max_length=77 , return_attention_mask=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="""pt""" , )
snake_case_ = text_inputs.input_ids
snake_case_ = self.tokenizer(lowerCamelCase , padding="""longest""" , return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowerCamelCase , lowerCamelCase ):
snake_case_ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
snake_case_ = text_input_ids.to(lowerCamelCase )
snake_case_ = text_inputs.attention_mask.to(lowerCamelCase )
snake_case_ , snake_case_ = self.text_encoder(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
snake_case_ = prompt_embeds.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = text_encoder_hidden_states.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = text_mask.repeat_interleave(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
snake_case_ = 42
if negative_prompt is None:
snake_case_ = [""""""] * batch_size
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !='''
f''' {type(lowerCamelCase )}.''' )
elif isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
snake_case_ = negative_prompt
snake_case_ = self.tokenizer(
lowerCamelCase , padding="""max_length""" , max_length=77 , truncation=lowerCamelCase , return_attention_mask=lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors="""pt""" , )
snake_case_ = uncond_input.input_ids.to(lowerCamelCase )
snake_case_ = uncond_input.attention_mask.to(lowerCamelCase )
snake_case_ , snake_case_ = self.text_encoder(
input_ids=lowerCamelCase , attention_mask=lowerCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case_ = negative_prompt_embeds.shape[1]
snake_case_ = negative_prompt_embeds.repeat(1 , lowerCamelCase )
snake_case_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCamelCase )
snake_case_ = uncond_text_encoder_hidden_states.shape[1]
snake_case_ = uncond_text_encoder_hidden_states.repeat(1 , lowerCamelCase , 1 )
snake_case_ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowerCamelCase , -1 )
snake_case_ = uncond_text_mask.repeat_interleave(lowerCamelCase , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
snake_case_ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
snake_case_ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase_ ( self , lowerCamelCase=0 ) -> List[Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
snake_case_ = torch.device(f'''cuda:{gpu_id}''' )
snake_case_ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase_ ( self , lowerCamelCase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
snake_case_ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
snake_case_ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
snake_case_ , snake_case_ = cpu_offload_with_hook(lowerCamelCase , lowerCamelCase , prev_module_hook=lowerCamelCase )
if self.safety_checker is not None:
snake_case_ , snake_case_ = cpu_offload_with_hook(self.safety_checker , lowerCamelCase , prev_module_hook=lowerCamelCase )
# We'll offload the last model manually.
snake_case_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase_ ( self ) -> List[Any]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase )
def __call__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = 512 , lowerCamelCase = 512 , lowerCamelCase = 100 , lowerCamelCase = 4.0 , lowerCamelCase = 1 , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , ) -> Union[str, Any]:
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = 1
elif isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = len(lowerCamelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}''' )
snake_case_ = self._execution_device
snake_case_ = batch_size * num_images_per_prompt
snake_case_ = guidance_scale > 1.0
snake_case_ , snake_case_ , snake_case_ = self._encode_prompt(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = torch.cat(lowerCamelCase , dim=0 )
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case_ = torch.cat(lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
snake_case_ = image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = negative_image_embeds.repeat_interleave(lowerCamelCase , dim=0 )
snake_case_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=lowerCamelCase )
self.scheduler.set_timesteps(lowerCamelCase , device=lowerCamelCase )
snake_case_ = self.scheduler.timesteps
snake_case_ = self.unet.config.in_channels
snake_case_ , snake_case_ = get_new_h_w(lowerCamelCase , lowerCamelCase , self.movq_scale_factor )
# create initial latent
snake_case_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowerCamelCase , lowerCamelCase , lowerCamelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
snake_case_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case_ = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
snake_case_ = self.unet(
sample=lowerCamelCase , timestep=lowerCamelCase , encoder_hidden_states=lowerCamelCase , added_cond_kwargs=lowerCamelCase , return_dict=lowerCamelCase , )[0]
if do_classifier_free_guidance:
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
snake_case_ , snake_case_ = noise_pred.chunk(2 )
snake_case_ , snake_case_ = variance_pred.chunk(2 )
snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
snake_case_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
snake_case_ , snake_case_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(
lowerCamelCase , lowerCamelCase , lowerCamelCase , generator=lowerCamelCase , ).prev_sample
# post-processing
snake_case_ = self.movq.decode(lowerCamelCase , force_not_quantize=lowerCamelCase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
snake_case_ = image * 0.5 + 0.5
snake_case_ = image.clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 34 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@parameterized.expand([(None,), ('foo.json',)] )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
_UpperCAmelCase = GenerationConfig.from_pretrained(UpperCAmelCase , config_name=UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = AutoConfig.from_pretrained('gpt2' )
_UpperCAmelCase = GenerationConfig.from_model_config(UpperCAmelCase )
_UpperCAmelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCAmelCase , UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = GenerationConfig()
_UpperCAmelCase = {
'max_new_tokens': 1024,
'foo': 'bar',
}
_UpperCAmelCase = copy.deepcopy(UpperCAmelCase )
_UpperCAmelCase = generation_config.update(**UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCAmelCase , {'foo': 'bar'} )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = GenerationConfig()
_UpperCAmelCase = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(UpperCAmelCase )
_UpperCAmelCase = GenerationConfig.from_pretrained(UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
_UpperCAmelCase = GenerationConfig.from_model_config(UpperCAmelCase )
assert not hasattr(UpperCAmelCase , 'foo' ) # no new kwargs should be initialized if from config
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
_UpperCAmelCase = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCAmelCase )
_UpperCAmelCase = GenerationConfig.from_pretrained(UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCamelCase ( cls ):
"""simple docstring"""
_UpperCAmelCase = TOKEN
HfFolder.save_token(UpperCAmelCase )
@classmethod
def UpperCamelCase ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
_UpperCAmelCase = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='test-generation-config' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
_UpperCAmelCase = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = GenerationConfig(
do_sample=UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
_UpperCAmelCase = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCAmelCase , repo_id='valid_org/test-generation-config-org' , push_to_hub=UpperCAmelCase , use_auth_token=self._token )
_UpperCAmelCase = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCAmelCase , getattr(UpperCAmelCase , UpperCAmelCase ) )
| 39 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
UpperCamelCase__ = ["image_processor", "tokenizer"]
UpperCamelCase__ = "Pix2StructImageProcessor"
UpperCamelCase__ = ("T5Tokenizer", "T5TokenizerFast")
def __init__( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = False
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self , UpperCAmelCase=None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = 2048 , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , **UpperCAmelCase )
else:
# add pixel_values and bbox
_UpperCAmelCase = self.image_processor(
UpperCAmelCase , return_tensors=UpperCAmelCase , max_patches=UpperCAmelCase , header_text=UpperCAmelCase , **UpperCAmelCase )
if text is not None and not self.image_processor.is_vqa:
_UpperCAmelCase = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if "attention_mask" in text_encoding:
_UpperCAmelCase = text_encoding.pop('attention_mask' )
if "input_ids" in text_encoding:
_UpperCAmelCase = text_encoding.pop('input_ids' )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase )
return encoding_image_processor
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCamelCase ( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 39 | 1 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_snake_case = logging.get_logger(__name__)
_snake_case = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
_snake_case = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_snake_case = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
_snake_case = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
_snake_case = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
_snake_case = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
_snake_case = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
_snake_case = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
_snake_case = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
_snake_case = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
_snake_case = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
_snake_case = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
_snake_case = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
_snake_case = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_snake_case = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_MAPPING
_snake_case = auto_class_update(FlaxAutoModel)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_snake_case = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_snake_case = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_snake_case = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_snake_case = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_snake_case = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_snake_case = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_snake_case = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_snake_case = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_snake_case = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_snake_case = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_snake_case = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class lowercase ( _BaseAutoModelClass ):
_a = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_snake_case = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 354 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class lowercase ( UpperCamelCase__ ):
_a = "audio-spectrogram-transformer"
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1e-12 , _a=16 , _a=True , _a=10 , _a=10 , _a=1024 , _a=128 , **_a , ) -> List[Any]:
super().__init__(**_a )
_A : Any = hidden_size
_A : Tuple = num_hidden_layers
_A : List[str] = num_attention_heads
_A : Any = intermediate_size
_A : Optional[Any] = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : Any = attention_probs_dropout_prob
_A : Optional[Any] = initializer_range
_A : Optional[Any] = layer_norm_eps
_A : str = patch_size
_A : Tuple = qkv_bias
_A : Dict = frequency_stride
_A : Union[str, Any] = time_stride
_A : Any = max_length
_A : Tuple = num_mel_bins
| 343 | 0 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
A ='\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n'
A ='\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n'
A ='\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def A ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ),
} ) , )
def A ( self : List[Any] , lowercase : List[List[List[str]]] , lowercase : List[List[str]] , lowercase : int = 1 , lowercase : int = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowercase , hypotheses=lowercase , min_len=lowercase , max_len=lowercase )
}
| 34 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''data2vec-text'''
def __init__( self , A=3_0522 , A=768 , A=12 , A=12 , A=3072 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.02 , A=1e-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = classifier_dropout
class a_ ( snake_case_ ):
'''simple docstring'''
@property
def snake_case_( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 58 | 0 |
from __future__ import annotations
from statistics import mean
def lowerCamelCase__ ( a , a , a ) -> list[int]:
_A: List[str] = [0] * no_of_processes
_A: Dict = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(a ):
_A: Any = burst_time[i]
_A: list[int] = []
_A: Tuple = 0
_A: Optional[int] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_A: List[Any] = []
_A: Union[str, Any] = -1
for i in range(a ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(a )
if len(a ) > 0:
_A: List[str] = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_A: List[str] = i
total_time += burst_time[target_process]
completed += 1
_A: Any = 0
_A: Dict = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def lowerCamelCase__ ( a , a , a ) -> list[int]:
_A: Dict = [0] * no_of_processes
for i in range(a ):
_A: int = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
UpperCAmelCase__ : int = 4
UpperCAmelCase__ : int = [2, 5, 3, 7]
UpperCAmelCase__ : Optional[Any] = [0, 0, 0, 0]
UpperCAmelCase__ : int = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase__ : Tuple = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 301 |
def lowerCamelCase__ ( a = 10 ) -> str:
if not isinstance(a , a ) or n < 0:
raise ValueError('''Invalid input''' )
_A: int = 10**n
_A: List[Any] = 2_84_33 * (pow(2 , 7_83_04_57 , a )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 301 | 1 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE = FlaxDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" ,safety_checker=__snake_case ,cache_dir=__snake_case )
SCREAMING_SNAKE_CASE = [t[-1] for t in os.walk(os.path.join(__snake_case ,os.listdir(__snake_case )[0] ,"""snapshots""" ) )]
SCREAMING_SNAKE_CASE = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith(""".bin""" ) for f in files )
@slow
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-pipe""" ,safety_checker=__snake_case )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(__snake_case )
SCREAMING_SNAKE_CASE = jax.random.split(__snake_case ,__snake_case )
SCREAMING_SNAKE_CASE = shard(__snake_case )
SCREAMING_SNAKE_CASE = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 4.1514745 ) < 1e-3
assert np.abs(np.abs(__snake_case ,dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
SCREAMING_SNAKE_CASE = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__snake_case ) == num_samples
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""flax""" ,safety_checker=__snake_case )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(__snake_case )
SCREAMING_SNAKE_CASE = jax.random.split(__snake_case ,__snake_case )
SCREAMING_SNAKE_CASE = shard(__snake_case )
SCREAMING_SNAKE_CASE = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.05652401) ) < 1e-3
assert np.abs((np.abs(__snake_case ,dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""bf16""" ,dtype=jnp.bfloataa ,safety_checker=__snake_case )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(__snake_case )
SCREAMING_SNAKE_CASE = jax.random.split(__snake_case ,__snake_case )
SCREAMING_SNAKE_CASE = shard(__snake_case )
SCREAMING_SNAKE_CASE = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(__snake_case ,dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""bf16""" ,dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(__snake_case )
SCREAMING_SNAKE_CASE = jax.random.split(__snake_case ,__snake_case )
SCREAMING_SNAKE_CASE = shard(__snake_case )
SCREAMING_SNAKE_CASE = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.04003906) ) < 1e-3
assert np.abs((np.abs(__snake_case ,dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxDDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,set_alpha_to_one=__snake_case ,steps_offset=1 ,)
SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""bf16""" ,dtype=jnp.bfloataa ,scheduler=__snake_case ,safety_checker=__snake_case ,)
SCREAMING_SNAKE_CASE = scheduler.create_state()
SCREAMING_SNAKE_CASE = scheduler_state
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = 50
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__snake_case )
# shard inputs and rng
SCREAMING_SNAKE_CASE = replicate(__snake_case )
SCREAMING_SNAKE_CASE = jax.random.split(__snake_case ,__snake_case )
SCREAMING_SNAKE_CASE = shard(__snake_case )
SCREAMING_SNAKE_CASE = pipeline(__snake_case ,__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.045043945) ) < 1e-3
assert np.abs((np.abs(__snake_case ,dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = num_samples * [prompt]
SCREAMING_SNAKE_CASE = jax.random.split(jax.random.PRNGKey(0 ) ,__snake_case )
SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""bf16""" ,dtype=jnp.bfloataa ,safety_checker=__snake_case ,)
SCREAMING_SNAKE_CASE = replicate(__snake_case )
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__snake_case )
SCREAMING_SNAKE_CASE = shard(__snake_case )
SCREAMING_SNAKE_CASE = pipeline(__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" ,revision="""bf16""" ,dtype=jnp.bfloataa ,safety_checker=__snake_case ,use_memory_efficient_attention=__snake_case ,)
SCREAMING_SNAKE_CASE = replicate(__snake_case )
SCREAMING_SNAKE_CASE = pipeline.prepare_inputs(__snake_case )
SCREAMING_SNAKE_CASE = shard(__snake_case )
SCREAMING_SNAKE_CASE = pipeline(__snake_case ,__snake_case ,__snake_case ,jit=__snake_case ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 296 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a__( nn.Module ):
def __init__( self : Any , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ):
super().__init__()
a : Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
a : Union[str, Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
a : Tuple = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
a : Any = [1, 0]
def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Dict=None , __snake_case : bool = True , ):
a : Dict = hidden_states
a : Tuple = []
a : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
a : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
a : Tuple = self.transformer_index_for_condition[i]
a : Union[str, Any] = self.transformers[transformer_index](
__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
a : Optional[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
a : int = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__snake_case )
| 297 | 0 |
'''simple docstring'''
class UpperCAmelCase :
'''simple docstring'''
def __init__( self ) -> List[str]:
lowercase__ : Dict = {}
def _lowerCAmelCase( self ) -> None:
print(self.vertex )
for i in self.vertex:
print(__lowerCAmelCase , ''' -> ''' , ''' -> '''.join([str(__lowerCAmelCase ) for j in self.vertex[i]] ) )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowerCAmelCase )
else:
# else make a new vertex
lowercase__ : Union[str, Any] = [to_vertex]
def _lowerCAmelCase( self ) -> None:
# visited array for storing already visited nodes
lowercase__ : str = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None:
# mark start vertex as visited
lowercase__ : List[str] = True
print(__lowerCAmelCase , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
__a: Optional[Any] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 214 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="attention" ):
lowercase__ : Dict = params[F"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
lowercase__ : Dict = params[F"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
lowercase__ : int = params[F"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
lowercase__ : Any = params[F"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ):
if split_mlp_wi:
lowercase__ : Tuple = params[F"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
lowercase__ : Tuple = params[F"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
lowercase__ : Dict = (wi_a, wi_a)
else:
lowercase__ : List[Any] = params[F"""{prefix}/layers_{i}/mlp/wi/kernel"""]
lowercase__ : int = params[F"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
return params[F"""{prefix}/layers_{i}/{layer_name}/scale"""]
def __UpperCamelCase ( UpperCAmelCase , *, UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = traverse_util.flatten_dict(variables['''target'''] )
lowercase__ : Any = {'''/'''.join(UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase__ : List[str] = '''encoder/layers_0/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , UpperCAmelCase )
lowercase__ : Union[str, Any] = collections.OrderedDict()
# Shared embeddings.
lowercase__ : str = old['''token_embedder/embedding''']
# Encoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
lowercase__ : List[Any] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''pre_attention_layer_norm''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''attention''' )
lowercase__ : Dict = layer_norm
lowercase__ : Any = k.T
lowercase__ : Optional[int] = o.T
lowercase__ : Optional[int] = q.T
lowercase__ : Any = v.T
# Block i, layer 1 (MLP).
lowercase__ : List[str] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , '''pre_mlp_layer_norm''' )
lowercase__ , lowercase__ : List[Any] = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , '''encoder''' , UpperCAmelCase )
lowercase__ : int = layer_norm
if split_mlp_wi:
lowercase__ : Union[str, Any] = wi[0].T
lowercase__ : List[str] = wi[1].T
else:
lowercase__ : int = wi.T
lowercase__ : List[str] = wo.T
lowercase__ : Dict = old[
'''encoder/relpos_bias/rel_embedding'''
].T
lowercase__ : str = old['''encoder/encoder_norm/scale''']
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
lowercase__ : Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_self_attention_layer_norm''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''self_attention''' )
lowercase__ : List[str] = layer_norm
lowercase__ : List[Any] = k.T
lowercase__ : List[Any] = o.T
lowercase__ : List[str] = q.T
lowercase__ : Optional[Any] = v.T
# Block i, layer 1 (Cross Attention).
lowercase__ : List[Any] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_cross_attention_layer_norm''' )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Any = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''encoder_decoder_attention''' )
lowercase__ : Any = layer_norm
lowercase__ : Optional[int] = k.T
lowercase__ : Optional[Any] = o.T
lowercase__ : Tuple = q.T
lowercase__ : List[Any] = v.T
# Block i, layer 2 (MLP).
lowercase__ : List[str] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , '''pre_mlp_layer_norm''' )
lowercase__ , lowercase__ : List[Any] = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , '''decoder''' , UpperCAmelCase )
lowercase__ : Optional[int] = layer_norm
if split_mlp_wi:
lowercase__ : List[str] = wi[0].T
lowercase__ : str = wi[1].T
else:
lowercase__ : str = wi.T
lowercase__ : Optional[Any] = wo.T
lowercase__ : Any = old['''decoder/decoder_norm/scale''']
lowercase__ : Optional[Any] = old[
'''decoder/relpos_bias/rel_embedding'''
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase__ : Dict = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : int = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase__ : Optional[int] = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase__ : List[Any] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
lowercase__ : Union[str, Any] = state_dict['''shared.weight''']
return state_dict
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : List[str] = checkpoints.load_tax_checkpoint(UpperCAmelCase )
lowercase__ : str = convert_tax_to_pytorch(UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase )
lowercase__ : List[Any] = make_state_dict(UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ):
lowercase__ : Dict = TaConfig.from_json_file(UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase__ : List[str] = TaEncoderModel(UpperCAmelCase )
else:
lowercase__ : Tuple = TaForConditionalGeneration(UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase )
print('''Done''' )
if __name__ == "__main__":
__a: Optional[Any] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
__a: Tuple = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 214 | 1 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : float):
return 0.0
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
SCREAMING_SNAKE_CASE_: str = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Dict = 5_12
SCREAMING_SNAKE_CASE_: Optional[Any] = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_: str = [filter_type.process(_UpperCAmelCase ) for item in inputs]
SCREAMING_SNAKE_CASE_: str = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_: Optional[int] = np.abs(np.fft.fft(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: int = 20 * np.logaa(_UpperCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
SCREAMING_SNAKE_CASE_: Union[str, Any] = get_bounds(_UpperCAmelCase , _UpperCAmelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_UpperCAmelCase )
plt.show()
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = 5_12
SCREAMING_SNAKE_CASE_: Tuple = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_: str = [filter_type.process(_UpperCAmelCase ) for item in inputs]
SCREAMING_SNAKE_CASE_: Any = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_: Tuple = np.angle(np.fft.fft(_UpperCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) )
plt.show()
| 13 |
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : Optional[int] ) -> List[str]:
__A : List[str] = k_size // 2
__A ,__A : List[Any] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__A : List[Any] = 1 / (2 * pi * sigma) * exp(-(square(__snake_case ) + square(__snake_case )) / (2 * square(__snake_case )) )
return g
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : int ) -> Union[str, Any]:
__A ,__A : Tuple = image.shape[0], image.shape[1]
# dst image height and width
__A : Tuple = height - k_size + 1
__A : Optional[Any] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__A : str = zeros((dst_height * dst_width, k_size * k_size) )
__A : Optional[Any] = 0
for i, j in product(range(__snake_case ) , range(__snake_case ) ):
__A : int = ravel(image[i : i + k_size, j : j + k_size] )
__A : List[str] = window
row += 1
# turn the kernel into shape(k*k, 1)
__A : List[Any] = gen_gaussian_kernel(__snake_case , __snake_case )
__A : Any = ravel(__snake_case )
# reshape and get the dst image
__A : Dict = dot(__snake_case , __snake_case ).reshape(__snake_case , __snake_case ).astype(__snake_case )
return dst
if __name__ == "__main__":
# read original image
lowercase__ : List[Any] = imread(r'''../image_data/lena.jpg''')
# turn image in gray scale value
lowercase__ : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowercase__ : Any = gaussian_filter(gray, 3, sigma=1)
lowercase__ : str = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('''gaussian filter with 3x3 mask''', gaussianaxa)
imshow('''gaussian filter with 5x5 mask''', gaussianaxa)
waitKey()
| 190 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = "ssube/stable-diffusion-x4-upscaler-onnx"
def UpperCamelCase_ ( self: str, a_: Optional[int]=0 ):
'''simple docstring'''
_snake_case : str = floats_tensor((1, 3, 128, 128), rng=random.Random(a_ ) )
_snake_case : int = torch.manual_seed(a_ )
_snake_case : Optional[int] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : int = self.get_dummy_inputs()
_snake_case : List[str] = pipe(**a_ ).images
_snake_case : str = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
_snake_case : Tuple = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="""CPUExecutionProvider""" )
_snake_case : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=a_ )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : Optional[int] = self.get_dummy_inputs()
_snake_case : List[str] = pipe(**a_ ).images
_snake_case : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case : int = np.array(
[0.6_898_892, 0.59_240_556, 0.52_499_527, 0.58_866_215, 0.52_258_235, 0.52_572_715, 0.62_414_473, 0.6_174_387, 0.6_214_964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="""CPUExecutionProvider""" )
_snake_case : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : List[Any] = self.get_dummy_inputs()
_snake_case : Dict = pipe(**a_ ).images
_snake_case : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case : int = np.array(
[0.7_659_278, 0.76_437_664, 0.75_579_107, 0.7_691_116, 0.77_666_986, 0.7_727_672, 0.7_758_664, 0.7_812_226, 0.76_942_515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="""CPUExecutionProvider""" )
_snake_case : List[str] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : Union[str, Any] = self.get_dummy_inputs()
_snake_case : Union[str, Any] = pipe(**a_ ).images
_snake_case : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case : str = np.array(
[0.6_974_782, 0.68_902_093, 0.70_135_885, 0.7_583_618, 0.7_804_545, 0.7_854_912, 0.78_667_426, 0.78_743_863, 0.78_070_223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="""CPUExecutionProvider""" )
_snake_case : Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : Any = self.get_dummy_inputs()
_snake_case : Union[str, Any] = pipe(**a_ ).images
_snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case : List[str] = np.array(
[0.77_424_496, 0.773_601, 0.7_645_288, 0.7_769_598, 0.7_772_739, 0.7_738_688, 0.78_187_233, 0.77_879_584, 0.767_043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Optional[Any] = ort.SessionOptions()
_snake_case : int = False
return options
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
_snake_case : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_snake_case : List[str] = init_image.resize((128, 128) )
# using the PNDM scheduler by default
_snake_case : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""", provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : Tuple = """A fantasy landscape, trending on artstation"""
_snake_case : str = torch.manual_seed(0 )
_snake_case : Optional[int] = pipe(
prompt=a_, image=a_, guidance_scale=7.5, num_inference_steps=10, generator=a_, output_type="""np""", )
_snake_case : Dict = output.images
_snake_case : Any = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_snake_case : List[Any] = np.array([0.4_883, 0.4_947, 0.4_980, 0.4_975, 0.4_982, 0.4_980, 0.5_000, 0.5_006, 0.4_972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_snake_case : List[Any] = init_image.resize((128, 128) )
_snake_case : List[Any] = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""", subfolder="""scheduler""" )
_snake_case : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""", scheduler=a_, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=a_ )
_snake_case : Union[str, Any] = """A fantasy landscape, trending on artstation"""
_snake_case : List[str] = torch.manual_seed(0 )
_snake_case : str = pipe(
prompt=a_, image=a_, guidance_scale=7.5, num_inference_steps=20, generator=a_, output_type="""np""", )
_snake_case : Union[str, Any] = output.images
_snake_case : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_snake_case : Dict = np.array(
[0.50_173_753, 0.50_223_356, 0.502_039, 0.50_233_036, 0.5_023_725, 0.5_022_601, 0.5_018_758, 0.50_234_085, 0.50_241_566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 371 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase( __a ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = 42
class lowercase( nn.Module ):
'''simple docstring'''
lowercase__ = 42
lowercase__ = (16, 32, 96, 2_56)
lowercase__ = jnp.floataa
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
_snake_case : int = []
for i in range(len(self.block_out_channels ) - 1 ):
_snake_case : int = self.block_out_channels[i]
_snake_case : Tuple = self.block_out_channels[i + 1]
_snake_case : Dict = nn.Conv(
a_, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(a_ )
_snake_case : List[Any] = nn.Conv(
a_, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(a_ )
_snake_case : Any = blocks
_snake_case : Optional[Any] = nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self: Optional[Any], a_: Optional[Any] ):
'''simple docstring'''
_snake_case : int = self.conv_in(a_ )
_snake_case : Optional[int] = nn.silu(a_ )
for block in self.blocks:
_snake_case : Tuple = block(a_ )
_snake_case : int = nn.silu(a_ )
_snake_case : Optional[int] = self.conv_out(a_ )
return embedding
@flax_register_to_config
class lowercase( nn.Module , __a , __a ):
'''simple docstring'''
lowercase__ = 32
lowercase__ = 4
lowercase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowercase__ = False
lowercase__ = (3_20, 6_40, 12_80, 12_80)
lowercase__ = 2
lowercase__ = 8
lowercase__ = None
lowercase__ = 12_80
lowercase__ = 0.0
lowercase__ = False
lowercase__ = jnp.floataa
lowercase__ = True
lowercase__ = 0
lowercase__ = "rgb"
lowercase__ = (16, 32, 96, 2_56)
def UpperCamelCase_ ( self: int, a_: jax.random.KeyArray ):
'''simple docstring'''
_snake_case : str = (1, self.in_channels, self.sample_size, self.sample_size)
_snake_case : Optional[Any] = jnp.zeros(a_, dtype=jnp.floataa )
_snake_case : List[str] = jnp.ones((1,), dtype=jnp.intaa )
_snake_case : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa )
_snake_case : Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
_snake_case : Optional[int] = jnp.zeros(a_, dtype=jnp.floataa )
_snake_case , _snake_case : Tuple = jax.random.split(a_ )
_snake_case : str = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(a_, a_, a_, a_, a_ )["params"]
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.block_out_channels
_snake_case : Optional[int] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_snake_case : int = self.num_attention_heads or self.attention_head_dim
# input
_snake_case : Union[str, Any] = nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
_snake_case : int = FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
_snake_case : Any = FlaxTimestepEmbedding(a_, dtype=self.dtype )
_snake_case : Optional[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
_snake_case : List[str] = self.only_cross_attention
if isinstance(a_, a_ ):
_snake_case : List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(a_, a_ ):
_snake_case : Optional[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
_snake_case : List[str] = []
_snake_case : Tuple = []
_snake_case : int = block_out_channels[0]
_snake_case : Optional[Any] = nn.Conv(
a_, kernel_size=(1, 1), padding="""VALID""", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(a_ )
for i, down_block_type in enumerate(self.down_block_types ):
_snake_case : List[Any] = output_channel
_snake_case : Any = block_out_channels[i]
_snake_case : List[str] = i == len(a_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_snake_case : Optional[int] = FlaxCrossAttnDownBlockaD(
in_channels=a_, out_channels=a_, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
_snake_case : List[Any] = FlaxDownBlockaD(
in_channels=a_, out_channels=a_, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(a_ )
for _ in range(self.layers_per_block ):
_snake_case : List[Any] = nn.Conv(
a_, kernel_size=(1, 1), padding="""VALID""", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(a_ )
if not is_final_block:
_snake_case : List[Any] = nn.Conv(
a_, kernel_size=(1, 1), padding="""VALID""", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(a_ )
_snake_case : str = down_blocks
_snake_case : Union[str, Any] = controlnet_down_blocks
# mid
_snake_case : Tuple = block_out_channels[-1]
_snake_case : Optional[int] = FlaxUNetMidBlockaDCrossAttn(
in_channels=a_, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
_snake_case : Tuple = nn.Conv(
a_, kernel_size=(1, 1), padding="""VALID""", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self: str, a_: Any, a_: Tuple, a_: Any, a_: int, a_: float = 1.0, a_: bool = True, a_: bool = False, ):
'''simple docstring'''
_snake_case : Dict = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_snake_case : List[Any] = jnp.flip(a_, axis=1 )
# 1. time
if not isinstance(a_, jnp.ndarray ):
_snake_case : Any = jnp.array([timesteps], dtype=jnp.intaa )
elif isinstance(a_, jnp.ndarray ) and len(timesteps.shape ) == 0:
_snake_case : Union[str, Any] = timesteps.astype(dtype=jnp.floataa )
_snake_case : List[str] = jnp.expand_dims(a_, 0 )
_snake_case : List[str] = self.time_proj(a_ )
_snake_case : str = self.time_embedding(a_ )
# 2. pre-process
_snake_case : List[str] = jnp.transpose(a_, (0, 2, 3, 1) )
_snake_case : List[Any] = self.conv_in(a_ )
_snake_case : Union[str, Any] = jnp.transpose(a_, (0, 2, 3, 1) )
_snake_case : Any = self.controlnet_cond_embedding(a_ )
sample += controlnet_cond
# 3. down
_snake_case : List[str] = (sample,)
for down_block in self.down_blocks:
if isinstance(a_, a_ ):
_snake_case , _snake_case : Optional[Any] = down_block(a_, a_, a_, deterministic=not train )
else:
_snake_case , _snake_case : Dict = down_block(a_, a_, deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
_snake_case : Dict = self.mid_block(a_, a_, a_, deterministic=not train )
# 5. contronet blocks
_snake_case : Tuple = ()
for down_block_res_sample, controlnet_block in zip(a_, self.controlnet_down_blocks ):
_snake_case : Any = controlnet_block(a_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
_snake_case : List[Any] = controlnet_down_block_res_samples
_snake_case : int = self.controlnet_mid_block(a_ )
# 6. scaling
_snake_case : int = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=a_, mid_block_res_sample=a_ )
| 132 | 0 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
def get_masked_lm_array(__UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
lowerCAmelCase__ : int = tf.train.load_variable(__UpperCAmelCase , __UpperCAmelCase )
if "kernel" in name:
lowerCAmelCase__ : Optional[Any] = array.transpose()
return torch.from_numpy(__UpperCAmelCase )
def get_encoder_array(__UpperCAmelCase ):
lowerCAmelCase__ : List[Any] = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
lowerCAmelCase__ : Optional[Any] = tf.train.load_variable(__UpperCAmelCase , __UpperCAmelCase )
if "kernel" in name:
lowerCAmelCase__ : List[Any] = array.transpose()
return torch.from_numpy(__UpperCAmelCase )
def get_encoder_layer_array(__UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : Tuple = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
lowerCAmelCase__ : int = tf.train.load_variable(__UpperCAmelCase , __UpperCAmelCase )
if "kernel" in name:
lowerCAmelCase__ : int = array.transpose()
return torch.from_numpy(__UpperCAmelCase )
def get_encoder_attention_layer_array(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ : str = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
lowerCAmelCase__ : Optional[Any] = tf.train.load_variable(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = array.reshape(__UpperCAmelCase )
if "kernel" in name:
lowerCAmelCase__ : List[str] = array.transpose()
return torch.from_numpy(__UpperCAmelCase )
print(f"""Loading model based on config from {config_path}...""" )
lowerCAmelCase__ : Dict = BertConfig.from_json_file(__UpperCAmelCase )
lowerCAmelCase__ : Tuple = BertForMaskedLM(__UpperCAmelCase )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
lowerCAmelCase__ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
lowerCAmelCase__ : BertSelfAttention = layer.attention.self
lowerCAmelCase__ : Tuple = get_encoder_attention_layer_array(
__UpperCAmelCase , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
lowerCAmelCase__ : Optional[int] = get_encoder_attention_layer_array(
__UpperCAmelCase , """_query_dense/bias""" , self_attn.query.bias.data.shape )
lowerCAmelCase__ : Dict = get_encoder_attention_layer_array(
__UpperCAmelCase , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
lowerCAmelCase__ : List[str] = get_encoder_attention_layer_array(
__UpperCAmelCase , """_key_dense/bias""" , self_attn.key.bias.data.shape )
lowerCAmelCase__ : Union[str, Any] = get_encoder_attention_layer_array(
__UpperCAmelCase , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
lowerCAmelCase__ : str = get_encoder_attention_layer_array(
__UpperCAmelCase , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
lowerCAmelCase__ : BertSelfOutput = layer.attention.output
lowerCAmelCase__ : List[str] = get_encoder_attention_layer_array(
__UpperCAmelCase , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
lowerCAmelCase__ : str = get_encoder_attention_layer_array(
__UpperCAmelCase , """_output_dense/bias""" , self_output.dense.bias.data.shape )
lowerCAmelCase__ : Optional[Any] = get_encoder_layer_array(__UpperCAmelCase , """_attention_layer_norm/gamma""" )
lowerCAmelCase__ : Any = get_encoder_layer_array(__UpperCAmelCase , """_attention_layer_norm/beta""" )
# Intermediate
lowerCAmelCase__ : BertIntermediate = layer.intermediate
lowerCAmelCase__ : Optional[int] = get_encoder_layer_array(__UpperCAmelCase , """_intermediate_dense/kernel""" )
lowerCAmelCase__ : Tuple = get_encoder_layer_array(__UpperCAmelCase , """_intermediate_dense/bias""" )
# Output
lowerCAmelCase__ : BertOutput = layer.output
lowerCAmelCase__ : Tuple = get_encoder_layer_array(__UpperCAmelCase , """_output_dense/kernel""" )
lowerCAmelCase__ : List[str] = get_encoder_layer_array(__UpperCAmelCase , """_output_dense/bias""" )
lowerCAmelCase__ : str = get_encoder_layer_array(__UpperCAmelCase , """_output_layer_norm/gamma""" )
lowerCAmelCase__ : Optional[Any] = get_encoder_layer_array(__UpperCAmelCase , """_output_layer_norm/beta""" )
# Embeddings
lowerCAmelCase__ : Any = get_encoder_array("""_position_embedding_layer/embeddings""" )
lowerCAmelCase__ : List[str] = get_encoder_array("""_type_embedding_layer/embeddings""" )
lowerCAmelCase__ : Union[str, Any] = get_encoder_array("""_embedding_norm_layer/gamma""" )
lowerCAmelCase__ : Union[str, Any] = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
lowerCAmelCase__ : int = model.cls.predictions.transform
lowerCAmelCase__ : Union[str, Any] = get_masked_lm_array("""dense/kernel""" )
lowerCAmelCase__ : Optional[Any] = get_masked_lm_array("""dense/bias""" )
lowerCAmelCase__ : Any = get_masked_lm_array("""layer_norm/gamma""" )
lowerCAmelCase__ : int = get_masked_lm_array("""layer_norm/beta""" )
lowerCAmelCase__ : str = get_masked_lm_array("""embedding_table""" )
# Pooling
lowerCAmelCase__ : Optional[int] = BertPooler(config=__UpperCAmelCase )
lowerCAmelCase__ : BertPooler = get_encoder_array("""_pooler_layer/kernel""" )
lowerCAmelCase__ : BertPooler = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(__UpperCAmelCase )
# Integration test - should load without any errors ;)
lowerCAmelCase__ : Dict = BertForMaskedLM.from_pretrained(__UpperCAmelCase )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
_A = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 242 |
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( __UpperCAmelCase ) -> int:
if not nums:
return 0
lowerCAmelCase__ : List[Any] = nums[0]
lowerCAmelCase__ : List[str] = 0
for num in nums[1:]:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = (
max_excluding + num,
max(__UpperCAmelCase , __UpperCAmelCase ),
)
return max(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 | 1 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ =logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowercase__ =[]
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__a : Tuple = state_dict.pop(lowerCAmelCase__ )
__a : Optional[Any] = val
def __UpperCamelCase ( lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__a : List[str] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__a : List[Any] = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
__a : str = value
else:
__a : Dict = value
return new_state_dict
def __UpperCamelCase ( lowerCAmelCase__ : Any ):
"""simple docstring"""
__a : Dict = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__a : Any = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
__a : Optional[Any] = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__a : Tuple = in_proj_weight[:2_5_6, :]
__a : Optional[int] = in_proj_bias[:2_5_6]
__a : List[str] = in_proj_weight[2_5_6:5_1_2, :]
__a : Tuple = in_proj_bias[2_5_6:5_1_2]
__a : List[Any] = in_proj_weight[-2_5_6:, :]
__a : List[str] = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
__a : Optional[Any] = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__a : Tuple = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__a : Optional[int] = in_proj_weight[:2_5_6, :]
__a : Dict = in_proj_bias[:2_5_6]
__a : Any = in_proj_weight[2_5_6:5_1_2, :]
__a : int = in_proj_bias[2_5_6:5_1_2]
__a : Tuple = in_proj_weight[-2_5_6:, :]
__a : Dict = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
__a : Union[str, Any] = state_dict.pop(
f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
__a : Dict = state_dict.pop(f"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
__a : Optional[Any] = in_proj_weight_cross_attn[:2_5_6, :]
__a : Any = in_proj_bias_cross_attn[:2_5_6]
__a : Optional[int] = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
__a : Optional[Any] = in_proj_bias_cross_attn[2_5_6:5_1_2]
__a : Any = in_proj_weight_cross_attn[-2_5_6:, :]
__a : str = in_proj_bias_cross_attn[-2_5_6:]
def __UpperCamelCase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__a , __a : Dict = image.size
__a : List[Any] = max(lowerCAmelCase__ , lowerCAmelCase__ )
__a : List[Any] = 8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0
__a : List[Any] = target_max_size / current_max_size
__a : str = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__a : Any = F.to_tensor(lowerCAmelCase__ )
__a : str = F.normalize(lowerCAmelCase__ , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
logger.info('''Converting model...''' )
# load original state dict
__a : Union[str, Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a : Any = rename_backbone_keys(lowerCAmelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCAmelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__a : Optional[int] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__a : List[str] = state_dict.pop(lowerCAmelCase__ )
__a : int = val
# create HuggingFace model and load state dict
__a : int = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
__a : List[Any] = 1_5
__a : Union[str, Any] = 2
__a : int = {0: '''table''', 1: '''table rotated'''}
__a : Tuple = idalabel
__a : Dict = {v: k for k, v in idalabel.items()}
else:
__a : str = 1_2_5
__a : Any = 6
__a : List[Any] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
__a : Tuple = idalabel
__a : List[str] = {v: k for k, v in idalabel.items()}
__a : Tuple = DetrImageProcessor(
format='''coco_detection''' , max_size=8_0_0 if '''detection''' in checkpoint_url else 1_0_0_0 )
__a : Optional[Any] = TableTransformerForObjectDetection(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# verify our conversion
__a : Any = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
__a : Dict = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowerCAmelCase__ )
__a : List[Any] = Image.open(lowerCAmelCase__ ).convert('''RGB''' )
__a : int = normalize(resize(lowerCAmelCase__ , lowerCAmelCase__ ) ).unsqueeze(0 )
__a : Optional[Any] = model(lowerCAmelCase__ )
if "detection" in checkpoint_url:
__a : Optional[Any] = (1, 1_5, 3)
__a : Optional[int] = torch.tensor(
[[-6.78_97, -1_6.9_9_8_5, 6.79_37], [-8.01_86, -2_2.2_1_9_2, 6.96_77], [-7.31_17, -2_1.0_7_0_8, 7.40_55]] )
__a : List[Any] = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
__a : Union[str, Any] = (1, 1_2_5, 7)
__a : Tuple = torch.tensor(
[[-1_8.1_4_3_0, -8.32_14, 4.82_74], [-1_8.4_6_8_5, -7.13_61, -4.26_67], [-2_6.3_6_9_3, -9.34_29, -4.99_62]] )
__a : int = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
__a : Tuple = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowerCAmelCase__ )
image_processor.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowercase__ =parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 364 |
from math import pi, sqrt
def __UpperCamelCase ( lowerCAmelCase__ : float ):
if num <= 0:
raise ValueError('''math domain error''' )
if num > 1_71.5:
raise OverflowError('''math range error''' )
elif num - int(lowerCAmelCase__ ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(lowerCAmelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __UpperCamelCase ( ):
assert gamma(0.5 ) == sqrt(lowerCAmelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase__ =1.0
while num:
lowercase__ =float(input('Gamma of: '))
print(F"""gamma({num}) = {gamma(num)}""")
print('\nEnter 0 to exit...')
| 90 | 0 |
def a_ ( __lowercase : float , __lowercase : float ) -> float:
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(__lowercase ) * abs(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 282 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) )
| 282 | 1 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def snake_case_(_UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
_snake_case = filter(lambda _UpperCamelCase : p.requires_grad , model.parameters() )
_snake_case = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A = logging.getLogger(__name__)
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
if metric == "rouge2":
_snake_case = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
_snake_case = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
_snake_case = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
_snake_case = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
_snake_case = ModelCheckpoint(
dirpath=_UpperCamelCase , filename=_UpperCamelCase , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=_UpperCamelCase , verbose=_UpperCamelCase , )
class lowercase_ ( pl.Callback ):
def UpperCamelCase_ ( self : Optional[Any] , A__ : Optional[Any] , A__ : Optional[int] ) -> List[str]:
_snake_case = {f"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(A__ )
@rank_zero_only
def UpperCamelCase_ ( self : Optional[int] , A__ : pl.Trainer , A__ : pl.LightningModule , A__ : str , A__ : List[str]=True ) -> None:
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_snake_case = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
_snake_case = Path(pl_module.hparams.output_dir )
if type_path == "test":
_snake_case = od / '''test_results.txt'''
_snake_case = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_snake_case = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
_snake_case = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=A__ )
generations_file.parent.mkdir(exist_ok=A__ )
with open(A__ , '''a+''' ) as writer:
for key in sorted(A__ ):
if key in ["log", "progress_bar", "preds"]:
continue
_snake_case = metrics[key]
if isinstance(A__ , torch.Tensor ):
_snake_case = val.item()
_snake_case = f"""{key}: {val:.6f}\n"""
writer.write(A__ )
if not save_generations:
return
if "preds" in metrics:
_snake_case = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(A__ )
@rank_zero_only
def UpperCamelCase_ ( self : Tuple , A__ : Any , A__ : List[str] ) -> Any:
try:
_snake_case = pl_module.model.model.num_parameters()
except AttributeError:
_snake_case = pl_module.model.num_parameters()
_snake_case = count_trainable_parameters(A__ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase_ ( self : List[str] , A__ : pl.Trainer , A__ : pl.LightningModule ) -> Union[str, Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(A__ , A__ , '''test''' )
@rank_zero_only
def UpperCamelCase_ ( self : Union[str, Any] , A__ : pl.Trainer , A__ : Optional[Any] ) -> Union[str, Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 278 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A = random.Random()
def snake_case_(_UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ) -> Optional[int]:
"""simple docstring"""
if rng is None:
_snake_case = global_rng
_snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase_ ( unittest.TestCase ):
def __init__( self : List[Any] , A__ : List[Any] , A__ : int=7 , A__ : Tuple=400 , A__ : int=2000 , A__ : Any=2048 , A__ : List[Any]=128 , A__ : Optional[int]=1 , A__ : Optional[Any]=512 , A__ : Any=30 , A__ : Any=44100 , ) -> int:
_snake_case = parent
_snake_case = batch_size
_snake_case = min_seq_length
_snake_case = max_seq_length
_snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case = spectrogram_length
_snake_case = feature_size
_snake_case = num_audio_channels
_snake_case = hop_length
_snake_case = chunk_length
_snake_case = sampling_rate
def UpperCamelCase_ ( self : str ) -> Optional[int]:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCamelCase_ ( self : Any , A__ : Any=False , A__ : List[str]=False ) -> Tuple:
def _flatten(A__ : List[str] ):
return list(itertools.chain(*A__ ) )
if equal_length:
_snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_snake_case = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase_ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Optional[int] = TvltFeatureExtractor
def UpperCamelCase_ ( self : Dict ) -> List[str]:
_snake_case = TvltFeatureExtractionTester(self )
def UpperCamelCase_ ( self : int ) -> Optional[int]:
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A__ , '''spectrogram_length''' ) )
self.assertTrue(hasattr(A__ , '''feature_size''' ) )
self.assertTrue(hasattr(A__ , '''num_audio_channels''' ) )
self.assertTrue(hasattr(A__ , '''hop_length''' ) )
self.assertTrue(hasattr(A__ , '''chunk_length''' ) )
self.assertTrue(hasattr(A__ , '''sampling_rate''' ) )
def UpperCamelCase_ ( self : Any ) -> Union[str, Any]:
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = feat_extract_first.save_pretrained(A__ )[0]
check_json_file_has_correct_format(A__ )
_snake_case = self.feature_extraction_class.from_pretrained(A__ )
_snake_case = feat_extract_first.to_dict()
_snake_case = feat_extract_second.to_dict()
_snake_case = dict_first.pop('''mel_filters''' )
_snake_case = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertEqual(A__ , A__ )
def UpperCamelCase_ ( self : int ) -> Union[str, Any]:
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = os.path.join(A__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(A__ )
_snake_case = self.feature_extraction_class.from_json_file(A__ )
_snake_case = feat_extract_first.to_dict()
_snake_case = feat_extract_second.to_dict()
_snake_case = dict_first.pop('''mel_filters''' )
_snake_case = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertEqual(A__ , A__ )
def UpperCamelCase_ ( self : Union[str, Any] ) -> Any:
# Initialize feature_extractor
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
_snake_case = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test not batched input
_snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_snake_case = feature_extractor(A__ , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_snake_case = feature_extractor(
A__ , return_tensors='''np''' , sampling_rate=44100 , mask_audio=A__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_snake_case = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_snake_case = np.asarray(A__ )
_snake_case = feature_extractor(A__ , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCamelCase_ ( self : Optional[Any] , A__ : Any ) -> Optional[int]:
_snake_case = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_snake_case = ds.sort('''id''' ).select(range(A__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCamelCase_ ( self : List[str] ) -> Optional[Any]:
_snake_case = self._load_datasamples(1 )
_snake_case = TvltFeatureExtractor()
_snake_case = feature_extractor(A__ , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
_snake_case = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A__ , atol=1e-4 ) )
| 278 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''big_bird'''
def __init__( self , lowercase=5_0_3_5_8 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=4_0_9_6 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=True , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=6_6 , lowercase="block_sparse" , lowercase=True , lowercase=False , lowercase=6_4 , lowercase=3 , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , sep_token_id=lowercase , **lowercase , )
A_ : List[str] = vocab_size
A_ : Optional[int] = max_position_embeddings
A_ : Any = hidden_size
A_ : Any = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : Dict = intermediate_size
A_ : List[str] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : Optional[Any] = initializer_range
A_ : List[str] = type_vocab_size
A_ : List[Any] = layer_norm_eps
A_ : Tuple = use_cache
A_ : List[str] = rescale_embeddings
A_ : List[Any] = attention_type
A_ : Optional[Any] = use_bias
A_ : List[str] = block_size
A_ : List[Any] = num_random_blocks
A_ : Optional[int] = classifier_dropout
class UpperCAmelCase ( __A ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
A_ : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A_ : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 140 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''time_series_transformer'''
lowerCamelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowercase = None , lowercase = None , lowercase = "student_t" , lowercase = "nll" , lowercase = 1 , lowercase = [1, 2, 3, 4, 5, 6, 7] , lowercase = "mean" , lowercase = 0 , lowercase = 0 , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = None , lowercase = 3_2 , lowercase = 3_2 , lowercase = 2 , lowercase = 2 , lowercase = 2 , lowercase = 2 , lowercase = True , lowercase = "gelu" , lowercase = 6_4 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 1_0_0 , lowercase = 0.02 , lowercase=True , **lowercase , ):
"""simple docstring"""
A_ : Tuple = prediction_length
A_ : Any = context_length or prediction_length
A_ : Any = distribution_output
A_ : Dict = loss
A_ : int = input_size
A_ : Any = num_time_features
A_ : List[str] = lags_sequence
A_ : List[Any] = scaling
A_ : str = num_dynamic_real_features
A_ : List[Any] = num_static_real_features
A_ : Optional[int] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A_ : Any = cardinality
else:
A_ : Any = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A_ : int = embedding_dimension
else:
A_ : Tuple = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
A_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
A_ : Any = input_size * len(lowercase ) + self._number_of_features
A_ : List[str] = d_model
A_ : Union[str, Any] = encoder_attention_heads
A_ : int = decoder_attention_heads
A_ : int = encoder_ffn_dim
A_ : str = decoder_ffn_dim
A_ : Tuple = encoder_layers
A_ : Tuple = decoder_layers
A_ : List[str] = dropout
A_ : str = attention_dropout
A_ : List[Any] = activation_dropout
A_ : List[Any] = encoder_layerdrop
A_ : int = decoder_layerdrop
A_ : Optional[int] = activation_function
A_ : str = init_std
A_ : int = use_cache
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 140 | 1 |
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowercase__ = logging.get_logger(__name__)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
a__: Optional[Any] = to_pil_image(_SCREAMING_SNAKE_CASE )
a__: List[Any] = pil_image.size
a__: List[Any] = pytesseract.image_to_data(_SCREAMING_SNAKE_CASE , lang=_SCREAMING_SNAKE_CASE , output_type='dict' , config=_SCREAMING_SNAKE_CASE )
a__: List[Any] = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
a__: str = [idx for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if not word.strip()]
a__: Tuple = [word for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
a__: Optional[Any] = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
a__: List[str] = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
a__: Tuple = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
a__: Dict = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
a__: int = []
for x, y, w, h in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: Tuple = [x, y, x + w, y + h]
actual_boxes.append(_SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
a__: Tuple = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __snake_case ( __lowerCAmelCase ):
a__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = 1 / 2_55 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , lowercase = None , lowercase = "" , **lowercase , ) -> None:
'''simple docstring'''
super().__init__(**lowercase)
a__: int = size if size is not None else {'height': 2_24, 'width': 2_24}
a__: Optional[Any] = get_size_dict(lowercase)
a__: Tuple = do_resize
a__: Dict = size
a__: Any = resample
a__: List[str] = do_rescale
a__: List[str] = rescale_value
a__: Tuple = do_normalize
a__: Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a__: Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
a__: List[Any] = apply_ocr
a__: int = ocr_lang
a__: List[str] = tesseract_config
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = PILImageResampling.BILINEAR , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
a__: List[str] = get_size_dict(lowercase)
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}')
a__: List[Any] = (size['height'], size['width'])
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
'''simple docstring'''
a__: Union[str, Any] = do_resize if do_resize is not None else self.do_resize
a__: List[Any] = size if size is not None else self.size
a__: Optional[int] = get_size_dict(lowercase)
a__: Optional[int] = resample if resample is not None else self.resample
a__: Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
a__: List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
a__: Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
a__: List[Any] = image_mean if image_mean is not None else self.image_mean
a__: int = image_std if image_std is not None else self.image_std
a__: Dict = apply_ocr if apply_ocr is not None else self.apply_ocr
a__: Union[str, Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
a__: Optional[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
a__: int = make_list_of_images(lowercase)
if not valid_images(lowercase):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.')
# All transformations expect numpy arrays.
a__: Optional[int] = [to_numpy_array(lowercase) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract')
a__: str = []
a__: Union[str, Any] = []
for image in images:
a__: Tuple = apply_tesseract(lowercase , lowercase , lowercase)
words_batch.append(lowercase)
boxes_batch.append(lowercase)
if do_resize:
a__: Union[str, Any] = [self.resize(image=lowercase , size=lowercase , resample=lowercase) for image in images]
if do_rescale:
a__: int = [self.rescale(image=lowercase , scale=lowercase) for image in images]
if do_normalize:
a__: List[Any] = [self.normalize(image=lowercase , mean=lowercase , std=lowercase) for image in images]
a__: Optional[Any] = [to_channel_dimension_format(lowercase , lowercase) for image in images]
a__: Optional[Any] = BatchFeature(data={'pixel_values': images} , tensor_type=lowercase)
if apply_ocr:
a__: int = words_batch
a__: Optional[int] = boxes_batch
return data
| 354 |
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
_enforce_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if n == 0:
return 0
a__: List[Any] = float('-inf' )
for i in range(1 , n + 1 ):
a__: Optional[Any] = max(
_SCREAMING_SNAKE_CASE , prices[i - 1] + naive_cut_rod_recursive(n - i , _SCREAMING_SNAKE_CASE ) )
return max_revue
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
_enforce_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: str = [float('-inf' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
a__: Dict = float('-inf' )
for i in range(1 , n + 1 ):
a__: Optional[Any] = max(
_SCREAMING_SNAKE_CASE , prices[i - 1] + _top_down_cut_rod_recursive(n - i , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
a__: Optional[int] = max_revenue
return max_rev[n]
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Dict:
_enforce_args(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
a__: str = [float('-inf' ) for _ in range(n + 1 )]
a__: Tuple = 0
for i in range(1 , n + 1 ):
a__: List[str] = max_rev[i]
for j in range(1 , i + 1 ):
a__: Tuple = max(_SCREAMING_SNAKE_CASE , prices[j - 1] + max_rev[i - j] )
a__: Union[str, Any] = max_revenue_i
return max_rev[n]
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
if n < 0:
a__: Optional[int] = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(_SCREAMING_SNAKE_CASE )
if n > len(_SCREAMING_SNAKE_CASE ):
a__: List[str] = (
'Each integral piece of rod must have a corresponding price. '
F'Got n = {n} but length of prices = {len(_SCREAMING_SNAKE_CASE )}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
def __a ( ) ->str:
a__: int = [6, 10, 12, 15, 20, 23]
a__: Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
a__: Any = 36
a__: Optional[int] = top_down_cut_rod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: List[Any] = bottom_up_cut_rod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: int = naive_cut_rod_recursive(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 203 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : List[str] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm1.weight""", f"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm1.bias""", f"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.weight""", f"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.attn.proj.bias""", f"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.norm2.weight""", f"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.norm2.bias""", f"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.weight""", f"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc1.bias""", f"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(f"""encoder.deit.blocks.{i}.mlp.fc2.weight""", f"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""encoder.deit.blocks.{i}.mlp.fc2.bias""", f"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowerCAmelCase__ : List[str] = state_dict.pop(f"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase__ : Any = in_proj_weight[
: encoder_config.hidden_size, :
]
lowerCAmelCase__ : int = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowerCAmelCase__ : int = in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Any = dct.pop(__UpperCAmelCase )
lowerCAmelCase__ : Any = val
def lowercase_ ( __UpperCAmelCase ) -> int:
if "handwritten" in checkpoint_url:
lowerCAmelCase__ : Tuple = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCAmelCase__ : Optional[int] = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
lowerCAmelCase__ : int = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Any = ViTConfig(image_size=384 , qkv_bias=__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowerCAmelCase__ : List[str] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
lowerCAmelCase__ : Dict = 1024
lowerCAmelCase__ : Tuple = 4096
lowerCAmelCase__ : Optional[Any] = 24
lowerCAmelCase__ : Tuple = 16
lowerCAmelCase__ : List[str] = 1024
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : List[str] = """relu"""
lowerCAmelCase__ : Dict = 1024
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : int = False
lowerCAmelCase__ : List[Any] = False
# load HuggingFace model
lowerCAmelCase__ : Tuple = ViTModel(__UpperCAmelCase , add_pooling_layer=__UpperCAmelCase )
lowerCAmelCase__ : int = TrOCRForCausalLM(__UpperCAmelCase )
lowerCAmelCase__ : Any = VisionEncoderDecoderModel(encoder=__UpperCAmelCase , decoder=__UpperCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
lowerCAmelCase__ : Union[str, Any] = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location="""cpu""" , check_hash=__UpperCAmelCase )["""model"""]
lowerCAmelCase__ : List[Any] = create_rename_keys(__UpperCAmelCase , __UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowerCAmelCase__ : int = state_dict.pop(__UpperCAmelCase )
if key.startswith("""decoder""" ) and "output_projection" not in key:
lowerCAmelCase__ : Optional[Any] = val
else:
lowerCAmelCase__ : int = val
# load state dict
model.load_state_dict(__UpperCAmelCase )
# Check outputs on an image
lowerCAmelCase__ : Any = ViTImageProcessor(size=encoder_config.image_size )
lowerCAmelCase__ : Optional[Any] = RobertaTokenizer.from_pretrained("""roberta-large""" )
lowerCAmelCase__ : List[str] = TrOCRProcessor(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ : Dict = processor(images=prepare_img(__UpperCAmelCase ) , return_tensors="""pt""" ).pixel_values
# verify logits
lowerCAmelCase__ : str = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowerCAmelCase__ : List[str] = model(pixel_values=__UpperCAmelCase , decoder_input_ids=__UpperCAmelCase )
lowerCAmelCase__ : str = outputs.logits
lowerCAmelCase__ : Union[str, Any] = torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
lowerCAmelCase__ : Optional[int] = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowerCAmelCase__ : int = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
lowerCAmelCase__ : Optional[Any] = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
lowerCAmelCase__ : int = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , __UpperCAmelCase , atol=1E-3 ), "First elements of logits not as expected"
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__UpperCAmelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_A = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 242 |
"""simple docstring"""
from typing import Any
import numpy as np
def lowercase_ ( __UpperCAmelCase ) -> bool:
return np.array_equal(__UpperCAmelCase , matrix.conjugate().T )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Optional[int] = v.conjugate().T
lowerCAmelCase__ : Optional[int] = v_star.dot(__UpperCAmelCase )
assert isinstance(__UpperCAmelCase , np.ndarray )
return (v_star_dot.dot(__UpperCAmelCase )) / (v_star.dot(__UpperCAmelCase ))
def lowercase_ ( ) -> None:
lowerCAmelCase__ : Union[str, Any] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
lowerCAmelCase__ : List[str] = np.array([[1], [2], [3]] )
assert is_hermitian(__UpperCAmelCase ), f"""{a} is not hermitian."""
print(rayleigh_quotient(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ : Union[str, Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__UpperCAmelCase ), f"""{a} is not hermitian."""
assert rayleigh_quotient(__UpperCAmelCase , __UpperCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 242 | 1 |
from __future__ import annotations
lowerCAmelCase = [-1_0, -5, 0, 5, 5.1, 1_1, 1_3, 2_1, 3, 4, -2_1, -1_0, -5, -1, 0]
lowerCAmelCase = [-5, 0, 5, 5.1, 1_1, 1_3, 2_1, -1, 4, -1, -1_0, -5, -1, 0, -1]
def _lowerCamelCase( lowercase__ ) -> list[float]:
'''simple docstring'''
__lowercase= []
__lowercase= len(lowercase__ )
for i in range(lowercase__ ):
__lowercase= -1
for j in range(i + 1 , lowercase__ ):
if arr[i] < arr[j]:
__lowercase= arr[j]
break
result.append(lowercase__ )
return result
def _lowerCamelCase( lowercase__ ) -> list[float]:
'''simple docstring'''
__lowercase= []
for i, outer in enumerate(lowercase__ ):
__lowercase= -1
for inner in arr[i + 1 :]:
if outer < inner:
__lowercase= inner
break
result.append(lowercase__ )
return result
def _lowerCamelCase( lowercase__ ) -> list[float]:
'''simple docstring'''
__lowercase= len(lowercase__ )
__lowercase= []
__lowercase= [-1] * arr_size
for index in reversed(range(lowercase__ ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
__lowercase= stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
lowerCAmelCase = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 304 |
from __future__ import annotations
import numpy as np
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return np.maximum(0 , lowercase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 304 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowerCamelCase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
__lowerCamelCase = value
else:
__lowerCamelCase = value
return new_state_dict
def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = ''
if is_panoptic:
__lowerCamelCase = 'conditional_detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowerCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
__lowerCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase = in_proj_weight[:256, :]
__lowerCamelCase = in_proj_bias[:256]
__lowerCamelCase = in_proj_weight[256:512, :]
__lowerCamelCase = in_proj_bias[256:512]
__lowerCamelCase = in_proj_weight[-256:, :]
__lowerCamelCase = in_proj_bias[-256:]
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__lowerCamelCase = 'resnet101'
if "dc5" in model_name:
__lowerCamelCase = True
__lowerCamelCase = 'panoptic' in model_name
if is_panoptic:
__lowerCamelCase = 250
else:
__lowerCamelCase = 91
__lowerCamelCase = 'huggingface/label-files'
__lowerCamelCase = 'coco-detection-id2label.json'
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = idalabel
__lowerCamelCase = {v: k for k, v in idalabel.items()}
# load image processor
__lowerCamelCase = 'coco_panoptic' if is_panoptic else 'coco_detection'
__lowerCamelCase = ConditionalDetrImageProcessor(format=UpperCamelCase__ )
# prepare image
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=UpperCamelCase__ , return_tensors='pt' )
__lowerCamelCase = encoding['pixel_values']
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
__lowerCamelCase = torch.hub.load('DeppMeng/ConditionalDETR' , UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval()
__lowerCamelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__lowerCamelCase = 'conditional_detr.' + src
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = rename_backbone_keys(UpperCamelCase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase__ , is_panoptic=UpperCamelCase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowerCamelCase = 'conditional_detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('conditional_detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
# finally, create HuggingFace model and load state dict
__lowerCamelCase = ConditionalDetrForSegmentation(UpperCamelCase__ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
model.push_to_hub(repo_id=UpperCamelCase__ , organization='DepuMeng' , commit_message='Add model' )
# verify our conversion
__lowerCamelCase = conditional_detr(UpperCamelCase__ )
__lowerCamelCase = model(UpperCamelCase__ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__A = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 90 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('sample_euler' )
UpperCamelCase = 'A painting of a squirrel eating a burger'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('sample_euler' )
UpperCamelCase = 'A painting of a squirrel eating a burger'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sd_pipe([prompt] , generator=A_ , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
UpperCamelCase = 'A painting of a squirrel eating a burger'
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = sd_pipe(
[prompt] , generator=A_ , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=A_ , )
UpperCamelCase = output.images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 222 | 0 |
import sys
def A (__A : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = len(__A )
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
UpperCAmelCase_ = [[0 for x in range(__A )] for x in range(__A )]
for chain_length in range(2 , __A ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ = a + chain_length - 1
UpperCAmelCase_ = sys.maxsize
for c in range(__A , __A ):
UpperCAmelCase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ = cost
UpperCAmelCase_ = c
return matrix, sol
def A (__A : Any , __A : Dict , __A : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if i == j:
print('''A''' + str(__A ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(__A , __A , optimal_solution[i][j] )
print_optiomal_solution(__A , optimal_solution[i][j] + 1 , __A )
print(''')''' , end=''' ''' )
def A () -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase_ = len(__A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ , UpperCAmelCase_ = matrix_chain_order(__A )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__A , 1 , n - 1 )
if __name__ == "__main__":
main()
| 352 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 7 | 0 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=True )-> Union[str, Any]:
if model_type not in MODEL_CLASSES:
raise ValueError(F"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}." )
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
UpperCamelCase = cached_file(__UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models )
UpperCamelCase = config_class.from_json_file(__UpperCamelCase )
UpperCamelCase = True
UpperCamelCase = True
print(F"Building TensorFlow model from configuration: {config}" )
UpperCamelCase = model_class(__UpperCamelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
UpperCamelCase = cached_file(
__UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
UpperCamelCase = load_pytorch_checkpoint_in_tfa_model(__UpperCamelCase , __UpperCamelCase )
if compare_with_pt_model:
UpperCamelCase = tf_model(tf_model.dummy_inputs , training=__UpperCamelCase ) # build the network
UpperCamelCase = torch.load(__UpperCamelCase , map_location="""cpu""" )
UpperCamelCase = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__UpperCamelCase , config=__UpperCamelCase , state_dict=__UpperCamelCase )
with torch.no_grad():
UpperCamelCase = pt_model(**pt_model.dummy_inputs )
UpperCamelCase = pto[0].numpy()
UpperCamelCase = tfo[0].numpy()
UpperCamelCase = np.amax(np.abs(np_pt - np_tf ) )
print(F"Max absolute difference between models outputs {diff}" )
assert diff <= 2E-2, F"Error, model absolute difference is >2e-2: {diff}"
# Save pytorch-model
print(F"Save TensorFlow model to {tf_dump_path}" )
tf_model.save_weights(__UpperCamelCase , save_format="""h5""" )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , __UpperCamelCase=False , )-> Dict:
if args_model_type is None:
UpperCamelCase = list(MODEL_CLASSES.keys() )
else:
UpperCamelCase = [args_model_type]
for j, model_type in enumerate(__UpperCamelCase , start=1 ):
print("""=""" * 100 )
print(F" Converting model type {j}/{len(__UpperCamelCase )}: {model_type}" )
print("""=""" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}." )
UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,UpperCamelCase = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
UpperCamelCase = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
UpperCamelCase = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__UpperCamelCase , __UpperCamelCase ) , start=1 ):
print("""-""" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F" Skipping finetuned checkpoint {model_shortcut_name}" )
continue
UpperCamelCase = model_shortcut_name
elif only_convert_finetuned_models:
print(F" Skipping not finetuned checkpoint {model_shortcut_name}" )
continue
print(
F" Converting checkpoint {i}/{len(__UpperCamelCase )}: {model_shortcut_name} - model_type {model_type}" )
print("""-""" * 100 )
if config_shortcut_name in aws_config_map:
UpperCamelCase = cached_file(__UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models )
else:
UpperCamelCase = config_shortcut_name
if model_shortcut_name in aws_model_maps:
UpperCamelCase = cached_file(__UpperCamelCase , __UpperCamelCase , force_download=not use_cached_models )
else:
UpperCamelCase = model_shortcut_name
if os.path.isfile(__UpperCamelCase ):
UpperCamelCase = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=__UpperCamelCase , pytorch_checkpoint_path=__UpperCamelCase , config_file=__UpperCamelCase , tf_dump_path=os.path.join(__UpperCamelCase , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__UpperCamelCase , )
if remove_cached_files:
os.remove(__UpperCamelCase )
os.remove(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 321 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ = 2_5_6
class a_ ( lowerCamelCase ):
lowercase = ["""melgan"""]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
UpperCamelCase = math.log(1e-5 ) # Matches MelGAN training.
UpperCamelCase = 4.0 # Largest value for most examples
UpperCamelCase = 128
self.register_modules(
notes_encoder=_SCREAMING_SNAKE_CASE , continuous_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , melgan=_SCREAMING_SNAKE_CASE , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = output_range
if clip:
UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , self.min_value , self.max_value )
# Scale to [0, 1].
UpperCamelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = input_range
UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if clip else outputs
# Scale to [0, 1].
UpperCamelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = input_tokens > 0
UpperCamelCase ,UpperCamelCase = self.notes_encoder(
encoder_input_tokens=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE )
UpperCamelCase ,UpperCamelCase = self.continuous_encoder(
encoder_inputs=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = noise_time
if not torch.is_tensor(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
UpperCamelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase = self.decoder(
encodings_and_masks=_SCREAMING_SNAKE_CASE , decoder_input_tokens=_SCREAMING_SNAKE_CASE , decoder_noise_time=_SCREAMING_SNAKE_CASE )
return logits
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "numpy" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_SCREAMING_SNAKE_CASE )}." )
UpperCamelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
UpperCamelCase = np.zeros([1, 0, self.n_dims] , np.floataa )
UpperCamelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device )
for i, encoder_input_tokens in enumerate(_SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCamelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCamelCase = ones
UpperCamelCase = self.scale_features(
_SCREAMING_SNAKE_CASE , output_range=[-1.0, 1.0] , clip=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_SCREAMING_SNAKE_CASE , continuous_mask=_SCREAMING_SNAKE_CASE , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCamelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase = self.decode(
encodings_and_masks=_SCREAMING_SNAKE_CASE , input_tokens=_SCREAMING_SNAKE_CASE , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
UpperCamelCase = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = self.scale_to_features(_SCREAMING_SNAKE_CASE , input_range=[-1.0, 1.0] )
UpperCamelCase = mel[:1]
UpperCamelCase = mel.cpu().float().numpy()
UpperCamelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info("""Generated segment""" , _SCREAMING_SNAKE_CASE )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
UpperCamelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCamelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_SCREAMING_SNAKE_CASE )
| 321 | 1 |
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float ):
'''simple docstring'''
return 10 - x * x
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: float , __lowerCamelCase: float ):
'''simple docstring'''
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) >= 0:
raise ValueError("Wrong space!" )
lowercase_ = a
while (b - a) >= 0.01:
# Find middle point
lowercase_ = (a + b) / 2
# Check if middle point is root
if equation(__lowerCamelCase ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__lowerCamelCase ) * equation(__lowerCamelCase ) < 0:
lowercase_ = c
else:
lowercase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 362 |
import sys
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
for chain_length in range(2 , __lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
lowercase_ = a + chain_length - 1
lowercase_ = sys.maxsize
for c in range(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase_ = cost
lowercase_ = c
return matrix, sol
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
if i == j:
print("A" + str(__lowerCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__lowerCamelCase , __lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCamelCase , optimal_solution[i][j] + 1 , __lowerCamelCase )
print(")" , end=" " )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = [30, 35, 15, 5, 10, 20, 25]
lowercase_ = len(__lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase_ , lowercase_ = matrix_chain_order(__lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 297 | 0 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCAmelCase : List[Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class _A ( __magic_name__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer
SCREAMING_SNAKE_CASE_ : List[str] = dataset
SCREAMING_SNAKE_CASE_ : List[Any] = len(_SCREAMING_SNAKE_CASE ) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies
def __iter__( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class _A ( __magic_name__):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = start_length
SCREAMING_SNAKE_CASE_ : Any = eof_strings
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_SCREAMING_SNAKE_CASE )
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = re.split('(%s)' % '|'.join(a ) , a )
# last string should be ""
return "".join(string_list[:-2] )
def A_ ( a , a , a , a , a , a=2_0 , **a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = defaultdict(a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(a ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch['ids'].shape[-1]
SCREAMING_SNAKE_CASE_ : str = accelerator.unwrap_model(a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=a , **a )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE_ : Union[str, Any] = batch['task_id'].repeat(a )
SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.pad_across_processes(
a , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE_ : List[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(a , a ):
gen_token_dict[task].append(a )
SCREAMING_SNAKE_CASE_ : str = [[] for _ in range(a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a )
code_gens[task].append(remove_last_block(a ) )
return code_gens
def A_ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = HfArgumentParser(a )
SCREAMING_SNAKE_CASE_ : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE_ : Optional[int] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE_ : List[str] = 'false'
if args.num_workers is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE_ : Dict = Accelerator()
set_seed(args.seed , device_specific=a )
# Load model and tokenizer
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.eos_token
SCREAMING_SNAKE_CASE_ : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE_ : Any = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , a , a )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE_ : List[str] = load_dataset('openai_humaneval' )
SCREAMING_SNAKE_CASE_ : str = load_metric('code_eval' )
SCREAMING_SNAKE_CASE_ : Tuple = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
SCREAMING_SNAKE_CASE_ : Any = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE_ : int = TokenizedDataset(a , human_eval['test'] , n_copies=a , n_tasks=a )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE_ : Tuple = DataLoader(a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = accelerator.prepare(a , a )
SCREAMING_SNAKE_CASE_ : List[str] = complete_code(
a , a , a , a , n_tasks=a , batch_size=args.batch_size , **a , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE_ : str = []
for task in tqdm(range(a ) ):
SCREAMING_SNAKE_CASE_ : str = human_eval['test'][task]['test']
SCREAMING_SNAKE_CASE_ : int = f"check({human_eval['test'][task]['entry_point']})"
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(
references=a , predictions=a , num_workers=args.num_workers )
print(f"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(a , a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 253 |
lowerCAmelCase : str = '0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 253 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : int = """openai-gpt"""
__lowerCamelCase : str = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case__=4_0478 , snake_case__=512 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1e-5 , snake_case__=0.02 , snake_case__="cls_index" , snake_case__=True , snake_case__=None , snake_case__=True , snake_case__=0.1 , **snake_case__ , ) -> Any:
'''simple docstring'''
UpperCAmelCase : Any =vocab_size
UpperCAmelCase : Optional[int] =n_positions
UpperCAmelCase : Any =n_embd
UpperCAmelCase : Tuple =n_layer
UpperCAmelCase : List[Any] =n_head
UpperCAmelCase : Dict =afn
UpperCAmelCase : Dict =resid_pdrop
UpperCAmelCase : str =embd_pdrop
UpperCAmelCase : str =attn_pdrop
UpperCAmelCase : int =layer_norm_epsilon
UpperCAmelCase : Optional[Any] =initializer_range
UpperCAmelCase : Optional[int] =summary_type
UpperCAmelCase : List[str] =summary_use_proj
UpperCAmelCase : Optional[Any] =summary_activation
UpperCAmelCase : Any =summary_first_dropout
UpperCAmelCase : Optional[int] =summary_proj_to_labels
super().__init__(**snake_case__ )
| 366 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 78 | 0 |
a__ = """Tobias Carryer"""
from time import time
class snake_case :
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : int=int(time())) -> Union[str, Any]: # noqa: B008
"""simple docstring"""
_snake_case : Union[str, Any] = multiplier
_snake_case : Any = increment
_snake_case : Tuple = modulo
_snake_case : str = seed
def UpperCamelCase_ ( self : Tuple) -> Optional[int]:
"""simple docstring"""
_snake_case : Any = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
a__ = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 317 |
from ...processing_utils import ProcessorMixin
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : int = ["""image_processor""", """feature_extractor"""]
snake_case_ : List[Any] = """TvltImageProcessor"""
snake_case_ : Dict = """TvltFeatureExtractor"""
def __init__( self : Any , lowerCAmelCase : Optional[int] , lowerCAmelCase : str) -> Optional[int]:
"""simple docstring"""
super().__init__(image_processor=lowerCAmelCase , feature_extractor=lowerCAmelCase)
_snake_case : List[Any] = image_processor
_snake_case : List[Any] = feature_extractor
def __call__( self : Union[str, Any] , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Dict=False , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Any , ) -> Any:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""")
_snake_case : Union[str, Any] = None
if images is not None:
_snake_case : Any = self.image_processor(lowerCAmelCase , mask_pixel=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if images_mixed is not None:
_snake_case : Union[str, Any] = self.image_processor(lowerCAmelCase , is_mixed=lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase)
if audio is not None:
_snake_case : int = self.feature_extractor(
lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , mask_audio=lowerCAmelCase , **lowerCAmelCase)
_snake_case : Any = {}
if audio is not None:
output_dict.update(lowerCAmelCase)
if images is not None:
output_dict.update(lowerCAmelCase)
if images_mixed_dict is not None:
output_dict.update(lowerCAmelCase)
return output_dict
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_snake_case : Optional[Any] = self.image_processor.model_input_names
_snake_case : List[str] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
| 317 | 1 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase__ ( __snake_case , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = LongformerTokenizer
__magic_name__ = True
__magic_name__ = LongformerTokenizerFast
__magic_name__ = True
def a ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowerCAmelCase : List[Any] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
_lowerCAmelCase : List[str] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : List[str] = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCamelCase_ ) )
def a ( self , **snake_case__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def a ( self , **snake_case__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : str = """lower newer"""
_lowerCAmelCase : Optional[Any] = """lower newer"""
return input_text, output_text
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
_lowerCAmelCase : List[str] = tokenizer.tokenize(lowerCamelCase_ ) # , add_prefix_space=True)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCAmelCase : Dict = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowerCamelCase_ ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowerCamelCase_ ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
_lowerCAmelCase : Dict = tokenizer.encode('sequence builders' , add_special_tokens=lowerCamelCase_ )
_lowerCAmelCase : str = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCamelCase_ )
_lowerCAmelCase : Optional[Any] = tokenizer.encode(
'sequence builders' , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
_lowerCAmelCase : List[str] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
_lowerCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
_lowerCAmelCase : str = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[str] = """Encode this sequence."""
_lowerCAmelCase : Optional[int] = tokenizer.byte_encoder[""" """.encode('utf-8' )[0]]
# Testing encoder arguments
_lowerCAmelCase : Dict = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
_lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase : Any = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ )
_lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_lowerCAmelCase : str = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
# Testing spaces after special tokens
_lowerCAmelCase : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{'mask_token': AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ )} ) # mask token has a left space
_lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
_lowerCAmelCase : int = """Encode <mask> sequence"""
_lowerCAmelCase : Optional[int] = """Encode <mask>sequence"""
_lowerCAmelCase : Dict = tokenizer.encode(lowerCamelCase_ )
_lowerCAmelCase : Optional[Any] = encoded.index(lowerCamelCase_ )
_lowerCAmelCase : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(lowerCamelCase_ )
_lowerCAmelCase : Union[str, Any] = encoded.index(lowerCamelCase_ )
_lowerCAmelCase : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCamelCase_ , lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
_lowerCAmelCase : str = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
_lowerCAmelCase : List[str] = """A, <mask> AllenNLP sentence."""
_lowerCAmelCase : List[str] = tokenizer_r.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ )
_lowerCAmelCase : List[Any] = tokenizer_p.encode_plus(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_lowerCAmelCase : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_lowerCAmelCase : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCamelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCamelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def a ( self ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
_lowerCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_lowerCAmelCase : Tuple = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowerCamelCase_ )
self.assertEqual(post_processor_state['add_prefix_space'] , lowerCamelCase_ )
self.assertEqual(post_processor_state['trim_offsets'] , lowerCamelCase_ )
def a ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCAmelCase : int = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCAmelCase : List[str] = F'{text_of_1_token} {text_of_1_token}'
_lowerCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
_lowerCAmelCase : Optional[int] = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
_lowerCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
_lowerCAmelCase : str = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
_lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
_lowerCAmelCase : List[str] = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ), len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
_lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
_lowerCAmelCase : Optional[Any] = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCamelCase_ ), len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
_lowerCAmelCase : List[Any] = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_lowerCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
_lowerCAmelCase : Any = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ) + 1, 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
_lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
_lowerCAmelCase : Any = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ), 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
_lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase_ , use_fast=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , trim_offsets=lowerCamelCase_ )
_lowerCAmelCase : Any = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ), 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , )
| 364 |
'''simple docstring'''
def lowercase (_A = 1_0_0_0_0_0_0 ):
"""simple docstring"""
_lowerCAmelCase : Any = set(range(3 , _A , 2 ) )
primes.add(2 )
for p in range(3 , _A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , _A , _A ) ) )
_lowerCAmelCase : Union[str, Any] = [float(_A ) for n in range(limit + 1 )]
for p in primes:
for n in range(_A , limit + 1 , _A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 25 | 0 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
if not isinstance(__snake_case, __snake_case ):
raise ValueError("""Input must be an integer""" )
if input_num <= 0:
raise ValueError("""Input must be positive""" )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
__A : List[str] = '''examples/'''
__A : int = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__A : Dict = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__A : Optional[int] = '''README.md'''
def lowercase ( __snake_case : int , __snake_case : Any , __snake_case : int ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : int = f.read()
lowercase_ , lowercase_ : List[str] = REPLACE_PATTERNS[pattern]
lowercase_ : Union[str, Any] = replace.replace('''VERSION''' , __snake_case )
lowercase_ : Optional[Any] = re_pattern.sub(__snake_case , __snake_case )
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(__snake_case )
def lowercase ( __snake_case : int ):
for folder, directories, fnames in os.walk(__snake_case ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__snake_case , __snake_case ) , __snake_case , pattern='''examples''' )
def lowercase ( __snake_case : Optional[Any] , __snake_case : Optional[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__snake_case , __snake_case , __snake_case )
if not patch:
update_version_in_examples(__snake_case )
def lowercase ( ):
lowercase_ : Union[str, Any] = '''🤗 Transformers currently provides the following architectures'''
lowercase_ : Union[str, Any] = '''1. Want to contribute a new model?'''
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowercase_ : List[str] = f.readlines()
# Find the start of the list.
lowercase_ : Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowercase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowercase_ : str = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(__snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__snake_case )
def lowercase ( ):
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowercase_ : List[Any] = f.read()
lowercase_ : List[str] = REPLACE_PATTERNS['''init'''][0].search(__snake_case ).groups()[0]
return packaging.version.parse(__snake_case )
def lowercase ( __snake_case : Optional[Any]=False ):
lowercase_ : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowercase_ : Optional[Any] = default_version.base_version
elif patch:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
lowercase_ : Optional[int] = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
lowercase_ : int = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : Dict = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case , patch=__snake_case )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowercase ( ):
lowercase_ : List[Any] = get_version()
lowercase_ : List[str] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
lowercase_ : Any = current_version.base_version
# Check with the user we got that right.
lowercase_ : Tuple = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__snake_case ) == 0:
lowercase_ : str = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__snake_case )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__A : int = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__A : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 33 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = "▁"
__magic_name__ = {"vocab_file": "sentencepiece.bpe.model"}
__magic_name__ = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
__magic_name__ = {
"facebook/mbart-large-50-one-to-many-mmt": 1024,
}
# fmt: off
__magic_name__ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : str = VOCAB_FILES_NAMES
__lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Optional[int] = ['''input_ids''', '''attention_mask''']
__lowercase : List[int] = []
__lowercase : List[int] = []
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ):
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
__SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
__SCREAMING_SNAKE_CASE = kwargs.get("""additional_special_tokens""" , [])
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__SCREAMING_SNAKE_CASE = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = len(self.sp_model)
__SCREAMING_SNAKE_CASE = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__)
}
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.lang_code_to_id.items()}
__SCREAMING_SNAKE_CASE = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__SCREAMING_SNAKE_CASE = src_lang if src_lang is not None else """en_XX"""
__SCREAMING_SNAKE_CASE = self.lang_code_to_id[self._src_lang]
__SCREAMING_SNAKE_CASE = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
@property
def snake_case_ ( self):
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def snake_case_ ( self):
return self._src_lang
@src_lang.setter
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self):
__SCREAMING_SNAKE_CASE = self.__dict__.copy()
__SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def snake_case_ ( self , lowerCAmelCase__):
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(lowerCAmelCase__)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self , lowerCAmelCase__):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = """"""
__SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__) + token
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(lowerCAmelCase__)
return out_string.strip()
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase__ , """wb""") as fi:
__SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__)
return (out_vocab_file,)
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = [1] * len(self.prefix_tokens)
__SCREAMING_SNAKE_CASE = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__)) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__)) + ([0] * len(lowerCAmelCase__)) + suffix_ones
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""")
__SCREAMING_SNAKE_CASE = src_lang
__SCREAMING_SNAKE_CASE = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.convert_tokens_to_ids(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tgt_lang_id
return inputs
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = "en_XX" , lowerCAmelCase__ = None , lowerCAmelCase__ = "ro_RO" , **lowerCAmelCase__ , ):
__SCREAMING_SNAKE_CASE = src_lang
__SCREAMING_SNAKE_CASE = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self):
return self.set_src_lang_special_tokens(self.src_lang)
def snake_case_ ( self):
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.lang_code_to_id[src_lang]
__SCREAMING_SNAKE_CASE = [self.cur_lang_code_id]
__SCREAMING_SNAKE_CASE = [self.eos_token_id]
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.lang_code_to_id[tgt_lang]
__SCREAMING_SNAKE_CASE = [self.cur_lang_code_id]
__SCREAMING_SNAKE_CASE = [self.eos_token_id]
| 255 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 255 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _a ( __a , __a , unittest.TestCase ):
__a : str = StableDiffusionSAGPipeline
__a : List[Any] = TEXT_TO_IMAGE_PARAMS
__a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
__a : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__a : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
__a : int = False
def A ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCAmelCase = CLIPTextModel(lowercase )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : List[str] , lowercase : Dict , lowercase : Optional[int]=0 ):
'''simple docstring'''
if str(lowercase ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(lowercase )
else:
UpperCAmelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
UpperCAmelCase = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def A ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
UpperCAmelCase = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = '''.'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = sag_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = '''.'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = sag_pipe(
[prompt] , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase = sag_pipe.to(lowercase )
sag_pipe.set_progress_bar_config(disable=lowercase )
UpperCAmelCase = '''.'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
UpperCAmelCase = output.images
assert image.shape == (1, 512, 768, 3)
| 34 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=10 , UpperCAmelCase=3 , UpperCAmelCase=32 * 4 , UpperCAmelCase=32 * 6 , UpperCAmelCase=4 , UpperCAmelCase=32 , ) -> Optional[Any]:
_snake_case = parent
_snake_case = batch_size
_snake_case = is_training
_snake_case = use_auxiliary_loss
_snake_case = num_queries
_snake_case = num_channels
_snake_case = min_size
_snake_case = max_size
_snake_case = num_labels
_snake_case = mask_feature_size
def lowercase (self ) -> str:
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCAmelCase )
_snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase )
_snake_case = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase ) > 0.5
).float()
_snake_case = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase ) > 0.5).long()
_snake_case = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase (self ) -> Tuple:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowercase (self ) -> Optional[Any]:
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.prepare_config_and_inputs()
_snake_case = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int:
_snake_case = output.encoder_hidden_states
_snake_case = output.pixel_decoder_hidden_states
_snake_case = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCAmelCase ) , config.decoder_config.decoder_layers )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Union[str, Any]:
with torch.no_grad():
_snake_case = MaskFormerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_snake_case = model(pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase )
_snake_case = model(UpperCAmelCase , output_hidden_states=UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCAmelCase , UpperCAmelCase )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
_snake_case = MaskFormerForInstanceSegmentation(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
def comm_check_on_output(UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_snake_case = model(pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase )
_snake_case = model(UpperCAmelCase )
comm_check_on_output(UpperCAmelCase )
_snake_case = model(
pixel_values=UpperCAmelCase , pixel_mask=UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase )
comm_check_on_output(UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowerCAmelCase ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCAmelCase_ = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase (self ) -> int:
_snake_case = MaskFormerModelTester(self )
_snake_case = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def lowercase (self ) -> int:
self.config_tester.run_common_tests()
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase , **UpperCAmelCase , output_hidden_states=UpperCAmelCase )
def lowercase (self ) -> Any:
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def lowercase (self ) -> Optional[Any]:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def lowercase (self ) -> Optional[int]:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def lowercase (self ) -> int:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def lowercase (self ) -> Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def lowercase (self ) -> Optional[Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase (self ) -> Tuple:
pass
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
@slow
def lowercase (self ) -> int:
for model_name in ["facebook/maskformer-swin-small-coco"]:
_snake_case = MaskFormerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def lowercase (self ) -> Tuple:
_snake_case = (self.model_tester.min_size,) * 2
_snake_case = {
"""pixel_values""": torch.randn((2, 3, *size) , device=UpperCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) , device=UpperCAmelCase ),
"""class_labels""": torch.zeros(2 , 10 , device=UpperCAmelCase ).long(),
}
_snake_case = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCAmelCase )
_snake_case = model(**UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowercase (self ) -> Dict:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase , **UpperCAmelCase , output_hidden_states=UpperCAmelCase )
def lowercase (self ) -> List[str]:
_snake_case, _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(UpperCAmelCase ).to(UpperCAmelCase )
_snake_case = model(**UpperCAmelCase , output_attentions=UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase (self ) -> Tuple:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_snake_case = self.all_model_classes[1]
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
_snake_case = model(UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase ).loss
loss.backward()
def lowercase (self ) -> List[str]:
# only MaskFormerForInstanceSegmentation has the loss
_snake_case = self.all_model_classes[1]
_snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = True
_snake_case = True
_snake_case = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
_snake_case = model(UpperCAmelCase , mask_labels=UpperCAmelCase , class_labels=UpperCAmelCase )
_snake_case = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_snake_case = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_snake_case = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_snake_case = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase = 1E-4
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase (self ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def lowercase (self ) -> str:
_snake_case = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(UpperCAmelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
_snake_case = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
_snake_case = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
_snake_case = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
_snake_case = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> List[str]:
_snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(UpperCAmelCase )
.eval()
)
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
_snake_case = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_snake_case = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
_snake_case = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> List[Any]:
_snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(UpperCAmelCase )
.eval()
)
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
_snake_case = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_snake_case = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
_snake_case = torch.tensor(UpperCAmelCase ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=UpperCAmelCase ) )
def lowercase (self ) -> Tuple:
_snake_case = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(UpperCAmelCase )
.eval()
)
_snake_case = self.default_image_processor
_snake_case = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
_snake_case = inputs["""pixel_values"""].to(UpperCAmelCase )
_snake_case = [el.to(UpperCAmelCase ) for el in inputs["""mask_labels"""]]
_snake_case = [el.to(UpperCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
_snake_case = model(**UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 341 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __A:
snake_case_ = 42
# setable values
snake_case_ = 42
snake_case_ = 42
snake_case_ = None
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , _snake_case , _snake_case ) -> Any:
'''simple docstring'''
return cls(common=_snake_case , init_noise_sigma=_snake_case , timesteps=_snake_case )
@dataclass
class __A( a ):
snake_case_ = 42
class __A( a , a ):
snake_case_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
snake_case_ = 42
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
return True
@register_to_config
def __init__( self , _snake_case = 1_000 , _snake_case = 0.0001 , _snake_case = 0.02 , _snake_case = "linear" , _snake_case = None , _snake_case = "fixed_small" , _snake_case = True , _snake_case = "epsilon" , _snake_case = jnp.floataa , ) -> str:
'''simple docstring'''
__a = dtype
def SCREAMING_SNAKE_CASE_ ( self , _snake_case = None ) -> DDPMSchedulerState:
'''simple docstring'''
if common is None:
__a = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
__a = jnp.array(1.0 , dtype=self.dtype )
__a = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_snake_case , init_noise_sigma=_snake_case , timesteps=_snake_case , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = None ) -> jnp.ndarray:
'''simple docstring'''
return sample
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = () ) -> DDPMSchedulerState:
'''simple docstring'''
__a = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__a = (jnp.arange(0 , _snake_case ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_snake_case , timesteps=_snake_case , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case=None , _snake_case=None ) -> str:
'''simple docstring'''
__a = state.common.alphas_cumprod[t]
__a = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__a = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__a = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__a = jnp.clip(_snake_case , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__a = jnp.log(jnp.clip(_snake_case , a_min=1E-20 ) )
elif variance_type == "fixed_large":
__a = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__a = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__a = variance
__a = state.common.betas[t]
__a = (predicted_variance + 1) / 2
__a = frac * max_log + (1 - frac) * min_log
return variance
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = None , _snake_case = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
'''simple docstring'''
__a = timestep
if key is None:
__a = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__a , __a = jnp.split(_snake_case , sample.shape[1] , axis=1 )
else:
__a = None
# 1. compute alphas, betas
__a = state.common.alphas_cumprod[t]
__a = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
__a = 1 - alpha_prod_t
__a = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__a = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__a = model_output
elif self.config.prediction_type == "v_prediction":
__a = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__a = jnp.clip(_snake_case , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__a = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__a = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__a = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__a = jax.random.split(_snake_case , num=1 )
__a = jax.random.normal(_snake_case , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_snake_case , _snake_case , predicted_variance=_snake_case ) ** 0.5) * noise
__a = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
__a = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_snake_case , state=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , ) -> jnp.ndarray:
'''simple docstring'''
return add_noise_common(state.common , _snake_case , _snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , ) -> jnp.ndarray:
'''simple docstring'''
return get_velocity_common(state.common , _snake_case , _snake_case , _snake_case )
def __len__( self ) -> Dict:
'''simple docstring'''
return self.config.num_train_timesteps
| 33 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 33 | 1 |
_UpperCAmelCase : Dict = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 50 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
lowerCAmelCase : Optional[int] = {
"""vocab_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt""",
"""allenai/longformer-large-4096""": (
"""https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt"""
),
},
}
lowerCAmelCase : Optional[Any] = {
"""allenai/longformer-base-4096""": 4096,
"""allenai/longformer-large-4096""": 4096,
"""allenai/longformer-large-4096-finetuned-triviaqa""": 4096,
"""allenai/longformer-base-4096-extra.pos.embd.only""": 4096,
"""allenai/longformer-large-4096-extra.pos.embd.only""": 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A_ ( ):
SCREAMING_SNAKE_CASE_: Any = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
SCREAMING_SNAKE_CASE_: Tuple = bs[:]
SCREAMING_SNAKE_CASE_: str = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_: Optional[int] = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = set()
SCREAMING_SNAKE_CASE_: Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_: Tuple = char
return pairs
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any]="replace" , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Optional[Any]="</s>" , lowerCAmelCase__ : int="<s>" , lowerCAmelCase__ : Optional[Any]="<unk>" , lowerCAmelCase__ : List[Any]="<pad>" , lowerCAmelCase__ : Any="<mask>" , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : Tuple , ):
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else bos_token
SCREAMING_SNAKE_CASE_: str = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else eos_token
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else sep_token
SCREAMING_SNAKE_CASE_: Union[str, Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else cls_token
SCREAMING_SNAKE_CASE_: int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else unk_token
SCREAMING_SNAKE_CASE_: Any = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_: Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__ , lowerCAmelCase__) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="utf-8") as vocab_handle:
SCREAMING_SNAKE_CASE_: Tuple = json.load(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_: Optional[Any] = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_: List[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="utf-8") as merges_handle:
SCREAMING_SNAKE_CASE_: List[Any] = merges_handle.read().split("\n")[1:-1]
SCREAMING_SNAKE_CASE_: str = [tuple(merge.split()) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_: List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__))))
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_: List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
def _SCREAMING_SNAKE_CASE ( self : int):
return len(self.encoder)
def _SCREAMING_SNAKE_CASE ( self : int):
return dict(self.encoder , **self.added_tokens_encoder)
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : List[str]):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_: Optional[int] = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = get_pairs(lowerCAmelCase__)
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_: int = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__: self.bpe_ranks.get(lowerCAmelCase__ , float("inf")))
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = bigram
SCREAMING_SNAKE_CASE_: Optional[int] = []
SCREAMING_SNAKE_CASE_: List[Any] = 0
while i < len(lowerCAmelCase__):
try:
SCREAMING_SNAKE_CASE_: List[Any] = word.index(lowerCAmelCase__ , lowerCAmelCase__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
SCREAMING_SNAKE_CASE_: Tuple = j
if word[i] == first and i < len(lowerCAmelCase__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
SCREAMING_SNAKE_CASE_: str = tuple(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = new_word
if len(lowerCAmelCase__) == 1:
break
else:
SCREAMING_SNAKE_CASE_: Dict = get_pairs(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = " ".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Tuple):
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__).split(" "))
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Tuple):
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token))
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any]):
return self.decoder.get(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = "".join(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
if not os.path.isdir(lowerCAmelCase__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
SCREAMING_SNAKE_CASE_: Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__) + "\n")
SCREAMING_SNAKE_CASE_: List[Any] = 0
with open(lowerCAmelCase__ , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__: kv[1]):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
SCREAMING_SNAKE_CASE_: List[Any] = token_index
writer.write(" ".join(lowerCAmelCase__) + "\n")
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_: Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_: Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__)
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__)) + [1]
return [1] + ([0] * len(lowerCAmelCase__)) + [1, 1] + ([0] * len(lowerCAmelCase__)) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=False , **lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_: Optional[Any] = " " + text
return (text, kwargs)
| 13 | 0 |
class A__ :
"""simple docstring"""
def __init__( self) -> Optional[Any]:
'''simple docstring'''
a__ : Any = {}
def __lowercase ( self) -> None:
'''simple docstring'''
print(self.vertex)
for i in self.vertex:
print(lowercase , ' -> ' , ' -> '.join([str(lowercase) for j in self.vertex[i]]))
def __lowercase ( self , lowercase , lowercase) -> None:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowercase)
else:
# else make a new vertex
a__ : str = [to_vertex]
def __lowercase ( self) -> None:
'''simple docstring'''
a__ : Tuple = [False] * len(self.vertex)
# call the recursive helper function
for i in range(len(self.vertex)):
if not visited[i]:
self.dfs_recursive(lowercase , lowercase)
def __lowercase ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__ : Optional[int] = True
print(lowercase , end=' ')
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowercase , lowercase)
if __name__ == "__main__":
lowercase : Dict = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 225 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase : List[str] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["""CLIPFeatureExtractor"""]
lowercase : Union[str, Any] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225 | 1 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : Any = BloomTokenizerFast
__SCREAMING_SNAKE_CASE : int = BloomTokenizerFast
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''tokenizer_file'''
__SCREAMING_SNAKE_CASE : Optional[int] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def a ( self ):
super().setUp()
snake_case_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer' )
tokenizer.save_pretrained(self.tmpdirname )
def a ( self , **snake_case ):
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def a ( self ):
snake_case_ = self.get_rust_tokenizer()
snake_case_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>']
snake_case_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]]
snake_case_ = tokenizer.batch_encode_plus(snake_case )['input_ids']
self.assertListEqual(snake_case , snake_case )
snake_case_ = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def a ( self , snake_case=6 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
snake_case_ = 'This is a simple input'
snake_case_ = ['This is a simple input 1', 'This is a simple input 2']
snake_case_ = ('This is a simple input', 'This is a pair')
snake_case_ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case , max_length=snake_case )
tokenizer_r.encode_plus(snake_case , max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case )
tokenizer_r.encode(snake_case , max_length=snake_case )
tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case )
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding' )
snake_case_ = None # Hotfixing padding = None
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length' )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def a ( self ):
snake_case_ = self.get_rust_tokenizer()
snake_case_ = load_dataset('xnli' , 'all_languages' , split='test' , streaming=snake_case )
snake_case_ = next(iter(snake_case ) )['premise'] # pick up one data
snake_case_ = list(sample_data.values() )
snake_case_ = list(map(tokenizer.encode , snake_case ) )
snake_case_ = [tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case ) for x in output_tokens]
self.assertListEqual(snake_case , snake_case )
def a ( self ):
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 285 |
from PIL import Image
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = (259 * (level + 255)) / (255 * (259 - level))
def contrast(UpperCamelCase__ ) -> int:
return int(128 + factor * (c - 128) )
return img.point(UpperCamelCase__ )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
_UpperCAmelCase : Tuple = change_contrast(img, 170)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 285 | 1 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class __lowerCAmelCase ( lowerCAmelCase):
_a = CustomTokenizer
pass
| 158 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __lowerCAmelCase ( lowerCAmelCase):
_a = 42
_a = None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase=0.999, lowerCamelCase="cosine", ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
lowercase :Optional[int] = []
for i in range(lowerCamelCase ):
lowercase :Any = i / num_diffusion_timesteps
lowercase :str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ), lowerCamelCase ) )
return torch.tensor(lowerCamelCase, dtype=torch.floataa )
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase):
_a = 1
@register_to_config
def __init__( self: Any , _lowerCAmelCase: int = 10_00 , _lowerCAmelCase: float = 0.00_01 , _lowerCAmelCase: float = 0.02 , _lowerCAmelCase: str = "linear" , _lowerCAmelCase: Optional[Union[np.ndarray, List[float]]] = None , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: int = 0 , _lowerCAmelCase: str = "epsilon" , _lowerCAmelCase: float = 1.0 , **_lowerCAmelCase: Union[str, Any] , ):
if kwargs.get("set_alpha_to_one" , _lowerCAmelCase ) is not None:
lowercase :Optional[int] = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , _lowerCAmelCase , standard_warn=_lowerCAmelCase )
lowercase :str = kwargs["set_alpha_to_one"]
if trained_betas is not None:
lowercase :int = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase :List[Any] = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase :Tuple = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase :Any = betas_for_alpha_bar(_lowerCAmelCase )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
lowercase :Dict = 1.0 - self.betas
lowercase :Dict = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
lowercase :Any = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
lowercase :Union[str, Any] = 1.0
# setable values
lowercase :str = None
lowercase :List[Any] = torch.from_numpy(np.arange(0 , _lowerCAmelCase ).copy().astype(np.intaa ) )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: Optional[int] = None ):
return sample
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: int , _lowerCAmelCase: Union[str, torch.device] = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
F" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
F" maximal {self.config.num_train_timesteps} timesteps." )
lowercase :List[Any] = num_inference_steps
lowercase :Optional[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase :str = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round().copy().astype(np.intaa )
lowercase :str = torch.from_numpy(_lowerCAmelCase ).to(_lowerCAmelCase )
self.timesteps += self.config.steps_offset
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: int , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: float = 0.0 , _lowerCAmelCase: bool = False , _lowerCAmelCase: Optional[torch.FloatTensor] = None , _lowerCAmelCase: bool = True , ):
# 1. get previous step value (=t+1)
lowercase :int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
lowercase :List[Any] = self.alphas_cumprod[timestep]
lowercase :Dict = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
lowercase :Optional[Any] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
lowercase :int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
lowercase :Optional[Any] = model_output
elif self.config.prediction_type == "sample":
lowercase :Union[str, Any] = model_output
lowercase :List[str] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
lowercase :Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
lowercase :str = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
lowercase :Optional[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase :List[Any] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase :Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase , pred_original_sample=_lowerCAmelCase )
def __len__( self: List[str] ):
return self.config.num_train_timesteps
| 158 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __A ( a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : int =BarthezTokenizer
UpperCamelCase__ : Union[str, Any] =BarthezTokenizerFast
UpperCamelCase__ : List[str] =True
UpperCamelCase__ : int =True
def __lowercase ( self ):
"""simple docstring"""
super().setUp()
__UpperCamelCase : Any =BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCamelCase__ )
__UpperCamelCase : str =tokenizer
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] ='<pad>'
__UpperCamelCase : Dict =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) , lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowerCamelCase__ ) , 101122 )
def __lowercase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =['A long paragraph for summarization.', 'Another paragraph for summarization.']
__UpperCamelCase : int =[0, 57, 3018, 70307, 91, 2]
__UpperCamelCase : Union[str, Any] =self.tokenizer(
lowerCamelCase__ , max_length=len(lowerCamelCase__ ) , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors='pt' )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__UpperCamelCase : List[str] =batch.input_ids.tolist()[0]
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__UpperCamelCase : Union[str, Any] =self.get_tokenizer()
__UpperCamelCase : int =self.get_rust_tokenizer()
__UpperCamelCase : str ='I was born in 92000, and this is falsé.'
__UpperCamelCase : Tuple =tokenizer.tokenize(lowerCamelCase__ )
__UpperCamelCase : List[str] =rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =rust_tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : List[Any] =self.get_rust_tokenizer()
__UpperCamelCase : Tuple =tokenizer.encode(lowerCamelCase__ )
__UpperCamelCase : List[str] =rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Dict ={'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__UpperCamelCase : str =[
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=lowerCamelCase__ , )
| 71 |
import re
def A ( a_ ) -> bool:
__UpperCamelCase : Any =re.compile(
r'^(?:0|94|\+94|0{2}94)' r'7(0|1|2|4|5|6|7|8)' r'(-| |)' r'\d{7}$' )
return bool(re.search(a_ ,a_ ) )
if __name__ == "__main__":
A_ :List[str] = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 71 | 1 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_UpperCamelCase = get_logger(__name__)
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=0 ):
"""simple docstring"""
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
with FSDP.state_dict_type(
lowerCAmelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__UpperCAmelCase : Any = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__UpperCAmelCase : Optional[int] = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__UpperCAmelCase : List[str] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__UpperCAmelCase : Union[str, Any] = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__UpperCAmelCase : int = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
logger.info(f'Saving model to {output_model_file}' )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__UpperCAmelCase : Optional[int] = os.path.join(lowerCAmelCase__ , f'{MODEL_NAME}_{model_index}' )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
logger.info(f'Saving model to {ckpt_dir}' )
__UpperCAmelCase : str = {"""model""": state_dict}
dist_cp.save_state_dict(
state_dict=lowerCAmelCase__ , storage_writer=dist_cp.FileSystemWriter(lowerCAmelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCAmelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(lowerCAmelCase__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"""Set the `sync_module_states` flag to `True` so that model states are synced across processes when """
"""initializing FSDP object""" )
return
__UpperCAmelCase : Dict = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
__UpperCAmelCase : Optional[int] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
logger.info(f'Loading model from {input_model_file}' )
__UpperCAmelCase : Any = torch.load(lowerCAmelCase__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__UpperCAmelCase : Optional[Any] = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
__UpperCAmelCase : List[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
logger.info(f'Loading model from {input_model_file}' )
__UpperCAmelCase : Tuple = torch.load(lowerCAmelCase__ )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__UpperCAmelCase : str = (
os.path.join(lowerCAmelCase__ , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
__UpperCAmelCase : Tuple = {"""model""": model.state_dict()}
dist_cp.load_state_dict(
state_dict=lowerCAmelCase__ , storage_reader=dist_cp.FileSystemReader(lowerCAmelCase__ ) , planner=DefaultLoadPlanner() , )
__UpperCAmelCase : Optional[Any] = state_dict["""model"""]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(lowerCAmelCase__ )
def lowercase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple=0 ):
"""simple docstring"""
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
with FSDP.state_dict_type(
lowerCAmelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__UpperCAmelCase : List[Any] = FSDP.optim_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__UpperCAmelCase : List[str] = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__UpperCAmelCase : Dict = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
__UpperCAmelCase : Optional[Any] = os.path.join(lowerCAmelCase__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"""optimizer""": optim_state} , storage_writer=dist_cp.FileSystemWriter(lowerCAmelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def lowercase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCAmelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__UpperCAmelCase : List[str] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__UpperCAmelCase : str = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
__UpperCAmelCase : Union[str, Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
__UpperCAmelCase : Dict = torch.load(lowerCAmelCase__ )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
__UpperCAmelCase : Tuple = (
os.path.join(lowerCAmelCase__ , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
__UpperCAmelCase : int = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="""optimizer""" , storage_reader=dist_cp.FileSystemReader(lowerCAmelCase__ ) , )
__UpperCAmelCase : Tuple = optim_state["""optimizer"""]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
__UpperCAmelCase : str = FSDP.optim_state_dict_to_load(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
optimizer.load_state_dict(lowerCAmelCase__ )
| 16 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCamelCase = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 16 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.