code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__A =re.compile(R'''\s+''')
def lowerCamelCase_ ( lowerCamelCase__ ):
return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = [len(lowerCamelCase__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )}
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=5 ):
lowerCamelCase_ = ["auto-generated", "autogenerated", "automatically generated"]
lowerCamelCase_ = example["content"].splitlines()
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ):
lowerCamelCase_ = ["unit tests", "test file", "configuration file"]
lowerCamelCase_ = example["content"].splitlines()
lowerCamelCase_ = 0
lowerCamelCase_ = 0
# first test
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
lowerCamelCase_ = example["content"].count("\n" )
lowerCamelCase_ = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = ["def ", "class ", "for ", "while "]
lowerCamelCase_ = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=4 ):
lowerCamelCase_ = example["content"].splitlines()
lowerCamelCase_ = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = tokenizer(example["content"] , truncation=lowerCamelCase__ )["input_ids"]
lowerCamelCase_ = len(example["content"] ) / len(lowerCamelCase__ )
return {"ratio": ratio}
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = {}
results.update(get_hash(lowerCamelCase__ ) )
results.update(line_stats(lowerCamelCase__ ) )
results.update(alpha_stats(lowerCamelCase__ ) )
results.update(char_token_ratio(lowerCamelCase__ ) )
results.update(is_autogenerated(lowerCamelCase__ ) )
results.update(is_config_or_test(lowerCamelCase__ ) )
results.update(has_no_keywords(lowerCamelCase__ ) )
results.update(has_few_assignments(lowerCamelCase__ ) )
return results
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCamelCase_ ( lowerCamelCase__ ):
with open(lowerCamelCase__ , "rb" ) as f_in:
with gzip.open(str(lowerCamelCase__ ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
os.unlink(lowerCamelCase__ )
# Settings
__A =HfArgumentParser(PreprocessingArguments)
__A =parser.parse_args()
if args.num_workers is None:
__A =multiprocessing.cpu_count()
__A =AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__A =time.time()
__A =load_dataset(args.dataset_name, split='''train''')
print(F"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
__A =time.time()
__A =ds.map(preprocess, num_proc=args.num_workers)
print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
__A =set(ds.unique('''hash'''))
__A =len(uniques) / len(ds)
print(F"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
__A =time.time()
__A =ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(F"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__A =time.time()
__A, __A =deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(F"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
__A =Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
__A =output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
__A =time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__A =str(data_dir / F"""file-{file_number+1:012}.json""")
__A =min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
| 463 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def SCREAMING_SNAKE_CASE_( self ) -> str:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def SCREAMING_SNAKE_CASE_( self ) -> torch.Tensor:
lowerCamelCase_ = torch.arange(self.height * self.width )
lowerCamelCase_ = torch.stack(
[
pixel_indices % self.width,
torch.div(lowercase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ , *lowerCamelCase_ = self.shape
lowerCamelCase_ = int(np.prod(lowercase ) )
lowerCamelCase_ = self.get_image_coords()
lowerCamelCase_ = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
lowerCamelCase_ = self.get_camera_rays(lowercase )
lowerCamelCase_ = rays.view(lowercase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> torch.Tensor:
lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_ = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowerCamelCase_ = coords.view(lowercase , -1 , 2 )
lowerCamelCase_ = self.resolution()
lowerCamelCase_ = self.fov()
lowerCamelCase_ = (flat.float() / (res - 1)) * 2 - 1
lowerCamelCase_ = fracs * torch.tan(fov / 2 )
lowerCamelCase_ = fracs.view(lowercase , -1 , 2 )
lowerCamelCase_ = (
self.z.view(lowercase , 1 , 3 )
+ self.x.view(lowercase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowercase , 1 , 3 ) * fracs[:, :, 1:]
)
lowerCamelCase_ = directions / directions.norm(dim=-1 , keepdim=lowercase )
lowerCamelCase_ = torch.stack(
[
torch.broadcast_to(self.origin.view(lowercase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowercase , *lowercase , 2 , 3 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowercase , height=lowercase , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
lowerCamelCase_ = np.array([np.sin(lowerCamelCase__ ), np.cos(lowerCamelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowerCamelCase_ = -z * 4
lowerCamelCase_ = np.array([np.cos(lowerCamelCase__ ), -np.sin(lowerCamelCase__ ), 0.0] )
lowerCamelCase_ = np.cross(lowerCamelCase__ , lowerCamelCase__ )
origins.append(lowerCamelCase__ )
xs.append(lowerCamelCase__ )
ys.append(lowerCamelCase__ )
zs.append(lowerCamelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , width=lowerCamelCase__ , height=lowerCamelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCamelCase__ )) , )
| 463 | 1 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def lowerCamelCase (a_ :str = "isbn/0140328726") -> dict:
lowercase :List[Any] = olid.strip().strip('''/''') # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''') != 1:
lowercase :Optional[int] = F"""{olid} is not a valid Open Library olid"""
raise ValueError(a_)
return requests.get(F"""https://openlibrary.org/{new_olid}.json""").json()
def lowerCamelCase (a_ :dict) -> dict:
lowercase :str = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
lowercase :str = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowercase :Dict = [
get_openlibrary_data(author['''key'''])['''name'''] for author in data['''Authors''']
]
lowercase :Optional[Any] = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(a_ , a_):
lowercase :int = ''', '''.join(a_)
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
UpperCAmelCase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
UpperCAmelCase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 475 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __magic_name__ ( __UpperCAmelCase ):
__A : torch.FloatTensor
__A : torch.FloatTensor
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ):
__A : Any = 1
@register_to_config
def __init__( self : int , snake_case__ : int = 2_0_0_0 , snake_case__ : float = 0.15 , snake_case__ : float = 0.01 , snake_case__ : float = 13_48.0 , snake_case__ : float = 1e-5 , snake_case__ : int = 1 , ):
'''simple docstring'''
lowercase :List[Any] = sigma_max
# setable values
lowercase :List[str] = None
self.set_sigmas(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def __snake_case ( self : Optional[Any] , snake_case__ : torch.FloatTensor , snake_case__ : Optional[int] = None ):
'''simple docstring'''
return sample
def __snake_case ( self : Optional[int] , snake_case__ : int , snake_case__ : float = None , snake_case__ : Union[str, torch.device] = None ):
'''simple docstring'''
lowercase :Optional[int] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowercase :Tuple = torch.linspace(1 , snake_case__ , snake_case__ , device=snake_case__ )
def __snake_case ( self : List[str] , snake_case__ : int , snake_case__ : float = None , snake_case__ : float = None , snake_case__ : float = None ):
'''simple docstring'''
lowercase :int = sigma_min if sigma_min is not None else self.config.sigma_min
lowercase :Any = sigma_max if sigma_max is not None else self.config.sigma_max
lowercase :Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(snake_case__ , snake_case__ )
lowercase :Union[str, Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowercase :str = torch.exp(torch.linspace(math.log(snake_case__ ) , math.log(snake_case__ ) , snake_case__ ) )
lowercase :List[str] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __snake_case ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : int ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __snake_case ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Optional[torch.Generator] = None , snake_case__ : bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowercase :Optional[int] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowercase :Optional[int] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowercase :Union[str, Any] = timesteps.to(self.discrete_sigmas.device )
lowercase :List[Any] = self.discrete_sigmas[timesteps].to(sample.device )
lowercase :str = self.get_adjacent_sigma(snake_case__ , snake_case__ ).to(sample.device )
lowercase :Optional[Any] = torch.zeros_like(snake_case__ )
lowercase :List[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowercase :Union[str, Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowercase :Dict = diffusion.unsqueeze(-1 )
lowercase :List[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowercase :List[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=snake_case__ , device=sample.device , dtype=sample.dtype )
lowercase :int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowercase :Any = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=snake_case__ , prev_sample_mean=snake_case__ )
def __snake_case ( self : Union[str, Any] , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : Optional[torch.Generator] = None , snake_case__ : bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowercase :Tuple = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case__ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowercase :List[Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowercase :List[str] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowercase :int = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowercase :Optional[Any] = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowercase :Union[str, Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowercase :Union[str, Any] = step_size.unsqueeze(-1 )
lowercase :Union[str, Any] = sample + step_size * model_output
lowercase :Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case__ )
def __snake_case ( self : List[str] , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , ):
'''simple docstring'''
lowercase :List[Any] = timesteps.to(original_samples.device )
lowercase :str = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowercase :Optional[int] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(snake_case__ ) * sigmas[:, None, None, None]
)
lowercase :str = noise + original_samples
return noisy_samples
def __len__( self : Optional[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 475 | 1 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
a__ : Tuple = JukeboxTokenizer
a__ : Union[str, Any] = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def snake_case_ ( self : str ) -> Union[str, Any]:
import torch
_A = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
_A = tokenizer(**self.metas )['''input_ids''']
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def snake_case_ ( self : Tuple ) -> Tuple:
import torch
_A = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
_A = tokenizer(**self.metas )['''input_ids''']
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 2 |
def SCREAMING_SNAKE_CASE_ ( _snake_case :bytes ) -> str:
return "".join([hex(_snake_case )[2:].zfill(2 ).upper() for byte in list(_snake_case )] )
def SCREAMING_SNAKE_CASE_ ( _snake_case :str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(_snake_case ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_snake_case ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_snake_case ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2 | 1 |
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> Dict:
"""simple docstring"""
stooge(SCREAMING_SNAKE_CASE_ ,0 ,len(SCREAMING_SNAKE_CASE_ ) - 1 )
return arr
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ) -> Tuple:
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_UpperCamelCase, _UpperCamelCase : Any = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_UpperCamelCase : str = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,(h - t) )
# Recursively sort last 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ ,i + t ,(SCREAMING_SNAKE_CASE_) )
# Recursively sort first 2/3 elements
stooge(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,(h - t) )
if __name__ == "__main__":
lowerCamelCase__ = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 715 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51 | 0 |
"""simple docstring"""
from math import isqrt
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(__UpperCamelCase ) + 1 ) )
def lowerCAmelCase ( __UpperCamelCase = 10**6 ):
'''simple docstring'''
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : Optional[Any] = 7
while prime_candidate < max_prime:
primes_count += is_prime(__UpperCamelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"{solution() = }")
| 65 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Optional[Any]:
'''simple docstring'''
a : Any = []
a : List[str] = set({"(", "[", "{"} )
a : int = set({")", "]", "}"} )
a : int = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowercase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowercase ) == 0 or (len(_lowercase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowercase ) == 0
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : Any = input("Enter sequence of brackets: " )
if is_balanced(_lowercase ):
print(_lowercase , "is balanced" )
else:
print(_lowercase , "is not balanced" )
if __name__ == "__main__":
main()
| 633 | 0 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowerCAmelCase__ = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowerCAmelCase__ = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f:
UpperCamelCase = json.loads(f.read() )
UpperCamelCase = collections.OrderedDict()
UpperCamelCase = collections.OrderedDict()
UpperCamelCase = collections.OrderedDict()
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f:
UpperCamelCase = f.readlines()
UpperCamelCase = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = b
UpperCamelCase = idx
for wd in b:
UpperCamelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__(self , __a , __a , __a="<|endoftext|>" , __a="<|endoftext|>" , __a="<|startoftext|>" , __a="<|endoftext|>" , __a=False , **__a , ) -> List[Any]:
super().__init__(
unk_token=__a , pad_token=__a , bos_token=__a , eos_token=__a , do_clean_text=__a , **__a , )
if not os.path.isfile(__a ):
raise ValueError(
F"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(__a ):
raise ValueError(
F"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
UpperCamelCase = do_clean_text
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = load_vocab_and_emoji(__a , __a )
UpperCamelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def snake_case_ (self ) -> List[str]:
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case_ (self ) -> List[Any]:
return dict(self.raw_vocab , **self.added_tokens_encoder )
def snake_case_ (self , __a ) -> Any:
return self.subword_tokenizer.tokenize(__a , clean=self.do_clean_text )
def snake_case_ (self , __a ) -> Any:
return self.vocab.get(__a , self.vocab.get(self.unk_token ) )
def snake_case_ (self , __a ) -> Optional[Any]:
return self.subword_tokenizer.convert_id_to_token(__a )
def snake_case_ (self , __a ) -> Tuple:
UpperCamelCase = "".join(__a ).strip()
return out_string
def snake_case_ (self , __a ) -> List[int]:
UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
def snake_case_ (self , __a , __a = None ) -> Tuple[str]:
UpperCamelCase = 0
if os.path.isdir(__a ):
UpperCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
UpperCamelCase = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
UpperCamelCase = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(__a , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
UpperCamelCase = token_index
writer.write(",".join(__a ) + "\n" )
index += 1
with open(__a , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , __a )
return vocab_file, emoji_file
class _lowerCamelCase ( _lowercase ):
def __init__(self , __a , __a , __a ) -> List[Any]:
UpperCamelCase = vocab # same as swe
UpperCamelCase = ids_to_tokens # same as bpe
UpperCamelCase = emoji
UpperCamelCase = np.max([len(__a ) for w in self.vocab.keys()] )
UpperCamelCase = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
UpperCamelCase = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
UpperCamelCase = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
UpperCamelCase = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCamelCase = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
UpperCamelCase = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
UpperCamelCase = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
UpperCamelCase = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
UpperCamelCase = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__(self ) -> Tuple:
return len(self.ids_to_tokens )
def snake_case_ (self , __a ) -> Optional[Any]:
UpperCamelCase = self.content_repattera.sub("<URL>" , __a )
UpperCamelCase = self.content_repattera.sub("<EMAIL>" , __a )
UpperCamelCase = self.content_repattera.sub("<TEL>" , __a )
UpperCamelCase = self.content_repattera.sub("<DATE>" , __a )
UpperCamelCase = self.content_repattera.sub("<DATE>" , __a )
UpperCamelCase = self.content_repattera.sub("<PRICE>" , __a )
UpperCamelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
UpperCamelCase = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def snake_case_ (self , __a , __a=False ) -> str:
UpperCamelCase = text.replace(" " , "<SP>" )
UpperCamelCase = text.replace(" " , "<SP>" )
UpperCamelCase = text.replace("\r\n" , "<BR>" )
UpperCamelCase = text.replace("\n" , "<BR>" )
UpperCamelCase = text.replace("\r" , "<BR>" )
UpperCamelCase = text.replace("\t" , "<TAB>" )
UpperCamelCase = text.replace("—" , "ー" )
UpperCamelCase = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
UpperCamelCase = text.replace(__a , __a )
if clean:
UpperCamelCase = self.clean_text(__a )
def check_simbol(__a ):
UpperCamelCase = x.encode()
if len(__a ) == 1 and len(__a ) == 2:
UpperCamelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2a1 and c <= 0Xc2bf)
or (c >= 0Xc780 and c <= 0Xc783)
or (c >= 0Xcab9 and c <= 0Xcbbf)
or (c >= 0Xcc80 and c <= 0Xcda2)
):
return True
return False
def checkuae(__a ):
UpperCamelCase = x.encode()
if len(__a ) == 1 and len(__a ) == 3:
UpperCamelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe28080 and c <= 0Xe2b07f:
return True
return False
UpperCamelCase = 0
UpperCamelCase = []
while pos < len(__a ):
UpperCamelCase = min(len(__a ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3
UpperCamelCase = [] # (token_id, token, pos)
for e in range(__a , __a , -1 ):
UpperCamelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__a ) > 2:
UpperCamelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__a ) > 0:
# the smallest token_id is adopted
UpperCamelCase , UpperCamelCase , UpperCamelCase = sorted(__a , key=lambda __a : x[0] )[0]
result.append(__a )
UpperCamelCase = e
else:
UpperCamelCase = pos + 1
UpperCamelCase = text[pos:end]
if check_simbol(__a ):
result.append("<KIGOU>" )
elif checkuae(__a ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
UpperCamelCase = end
return result
def snake_case_ (self , __a , __a="\n" ) -> int:
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__a ) > 0:
words.append(bytearray(__a ).decode("utf-8" , errors="replace" ) )
UpperCamelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(__a )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(__a )
if len(__a ) > 0:
words.append(bytearray(__a ).decode("utf-8" , errors="replace" ) )
UpperCamelCase = "".join(__a )
return text
| 544 |
"""simple docstring"""
lowerCAmelCase__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 544 | 1 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 124 |
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ = 2_0_0_0_0_0_0 ):
UpperCamelCase__ : Dict = [0 for i in range(n + 1 )]
UpperCamelCase__ : Any = 1
UpperCamelCase__ : int = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , UpperCamelCase__ ):
UpperCamelCase__ : Any = 1
UpperCamelCase__ : Optional[Any] = 0
for i in range(UpperCamelCase__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 285 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class a__ ( _snake_case , unittest.TestCase ):
"""simple docstring"""
A__ : Any = AlbertTokenizer
A__ : List[str] = AlbertTokenizerFast
A__ : Optional[Any] = True
A__ : List[str] = True
A__ : Optional[int] = True
def __UpperCAmelCase ( self :int ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase = AlbertTokenizer(lowercase__ )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self :List[str] , lowercase__ :Optional[Any] ):
lowercase = 'this is a test'
lowercase = 'this is a test'
return input_text, output_text
def __UpperCAmelCase ( self :List[str] ):
lowercase = '<pad>'
lowercase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase__ ) , lowercase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase__ ) , lowercase__ )
def __UpperCAmelCase ( self :List[Any] ):
lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(lowercase__ ) , 3_0000 )
def __UpperCAmelCase ( self :Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def __UpperCAmelCase ( self :Tuple ):
if not self.test_rust_tokenizer:
return
lowercase = self.get_tokenizer()
lowercase = self.get_rust_tokenizer()
lowercase = 'I was born in 92000, and this is falsé.'
lowercase = tokenizer.tokenize(lowercase__ )
lowercase = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
lowercase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
lowercase = rust_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
lowercase = self.get_rust_tokenizer()
lowercase = tokenizer.encode(lowercase__ )
lowercase = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def __UpperCAmelCase ( self :str ):
lowercase = AlbertTokenizer(lowercase__ , keep_accents=lowercase__ )
lowercase = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase__ , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , [48, 25, 21, 1289] )
lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase__ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
lowercase = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(lowercase__ , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
lowercase = tokenizer.convert_ids_to_tokens(lowercase__ )
self.assertListEqual(
lowercase__ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def __UpperCAmelCase ( self :Any ):
lowercase = AlbertTokenizer(lowercase__ )
lowercase = tokenizer.encode('sequence builders' )
lowercase = tokenizer.encode('multi-sequence build' )
lowercase = tokenizer.build_inputs_with_special_tokens(lowercase__ )
lowercase = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __UpperCAmelCase ( self :int ):
# fmt: off
lowercase = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase__ , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 314 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 314 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int=7 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : List[Any]=1_8 , UpperCamelCase__ : Dict=3_0 , UpperCamelCase__ : Optional[int]=4_0_0 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Union[str, Any]=True , ):
"""simple docstring"""
UpperCamelCase = size if size is not None else {'shortest_edge': 2_0}
UpperCamelCase = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = do_flip_channel_order
def A ( self : Optional[int] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MobileViTImageProcessor if is_vision_available() else None
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = MobileViTImageProcessingTester(self )
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , 'do_resize' ) )
self.assertTrue(hasattr(a_ , 'size' ) )
self.assertTrue(hasattr(a_ , 'do_center_crop' ) )
self.assertTrue(hasattr(a_ , 'center_crop' ) )
self.assertTrue(hasattr(a_ , 'do_flip_channel_order' ) )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 2_0} )
self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} )
def A ( self : Tuple ):
"""simple docstring"""
pass
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(a_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(a_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(a_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 430 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __snake_case ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ = 'wavlm'
def __init__( self ,a_=32 ,a_=768 ,a_=12 ,a_=12 ,a_=3072 ,a_="gelu" ,a_=0.1 ,a_=0.1 ,a_=0.1 ,a_=0.0 ,a_=0.1 ,a_=0.1 ,a_=0.02 ,a_=1e-5 ,a_="group" ,a_="gelu" ,a_=(512, 512, 512, 512, 512, 512, 512) ,a_=(5, 2, 2, 2, 2, 2, 2) ,a_=(10, 3, 3, 3, 3, 2, 2) ,a_=False ,a_=128 ,a_=16 ,a_=320 ,a_=800 ,a_=False ,a_=True ,a_=0.05 ,a_=10 ,a_=2 ,a_=0.0 ,a_=10 ,a_=320 ,a_=2 ,a_=0.1 ,a_=100 ,a_=256 ,a_=256 ,a_=0.1 ,a_="mean" ,a_=False ,a_=False ,a_=256 ,a_=(512, 512, 512, 512, 1500) ,a_=(5, 3, 3, 1, 1) ,a_=(1, 2, 3, 1, 1) ,a_=512 ,a_=80 ,a_=0 ,a_=1 ,a_=2 ,a_=False ,a_=3 ,a_=2 ,a_=3 ,a_=None ,**a_ ,):
"""simple docstring"""
super().__init__(**a_ ,pad_token_id=a_ ,bos_token_id=a_ ,eos_token_id=a_ )
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = feat_extract_norm
lowerCAmelCase__ = feat_extract_activation
lowerCAmelCase__ = list(a_ )
lowerCAmelCase__ = list(a_ )
lowerCAmelCase__ = list(a_ )
lowerCAmelCase__ = conv_bias
lowerCAmelCase__ = num_buckets
lowerCAmelCase__ = max_bucket_distance
lowerCAmelCase__ = num_conv_pos_embeddings
lowerCAmelCase__ = num_conv_pos_embedding_groups
lowerCAmelCase__ = len(self.conv_dim )
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = feat_proj_dropout
lowerCAmelCase__ = final_dropout
lowerCAmelCase__ = layerdrop
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_ctc_classes
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = do_stable_layer_norm
lowerCAmelCase__ = use_weighted_layer_sum
lowerCAmelCase__ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase__ = apply_spec_augment
lowerCAmelCase__ = mask_time_prob
lowerCAmelCase__ = mask_time_length
lowerCAmelCase__ = mask_time_min_masks
lowerCAmelCase__ = mask_feature_prob
lowerCAmelCase__ = mask_feature_length
# parameters for pretraining with codevector quantized representations
lowerCAmelCase__ = num_codevectors_per_group
lowerCAmelCase__ = num_codevector_groups
lowerCAmelCase__ = contrastive_logits_temperature
lowerCAmelCase__ = num_negatives
lowerCAmelCase__ = codevector_dim
lowerCAmelCase__ = proj_codevector_dim
lowerCAmelCase__ = diversity_loss_weight
# ctc loss
lowerCAmelCase__ = ctc_loss_reduction
lowerCAmelCase__ = ctc_zero_infinity
# adapter
lowerCAmelCase__ = add_adapter
lowerCAmelCase__ = adapter_kernel_size
lowerCAmelCase__ = adapter_stride
lowerCAmelCase__ = num_adapter_layers
lowerCAmelCase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase__ = list(a_ )
lowerCAmelCase__ = list(a_ )
lowerCAmelCase__ = list(a_ )
lowerCAmelCase__ = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 193 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''pixel_values''']
def __init__( self :List[str] , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[int, float] = 1 / 255 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :bool = True , **lowerCAmelCase__ :Optional[int] , ) -> None:
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else {'''shortest_edge''': 224}
__SCREAMING_SNAKE_CASE : Dict = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256}
__SCREAMING_SNAKE_CASE : Any = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' )
__SCREAMING_SNAKE_CASE : List[str] = do_resize
__SCREAMING_SNAKE_CASE : List[Any] = size
__SCREAMING_SNAKE_CASE : Optional[Any] = resample
__SCREAMING_SNAKE_CASE : Union[str, Any] = do_rescale
__SCREAMING_SNAKE_CASE : Any = rescale_factor
__SCREAMING_SNAKE_CASE : int = do_center_crop
__SCREAMING_SNAKE_CASE : Optional[int] = crop_size
__SCREAMING_SNAKE_CASE : Any = do_flip_channel_order
def __magic_name__( self :Dict , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :PILImageResampling = PIL.Image.BILINEAR , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :List[str] , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE : str = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
__SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Tuple , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Dict , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[int, float] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :List[Any] , ) -> Dict:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Any , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray:
return flip_channel_order(lowerCAmelCase__ , data_format=lowerCAmelCase__ )
def __magic_name__( self :Any , lowerCAmelCase__ :ImageInput , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :float = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ :Tuple , ) -> PIL.Image.Image:
__SCREAMING_SNAKE_CASE : Dict = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE : Optional[Any] = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE : List[str] = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE : Optional[Any] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
__SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else self.size
__SCREAMING_SNAKE_CASE : List[str] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE : Any = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE : List[str] = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE : int = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
__SCREAMING_SNAKE_CASE : Optional[Any] = [self.flip_channel_order(image=lowerCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE : Optional[Any] = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE : str = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Tuple] = None ) -> Any:
__SCREAMING_SNAKE_CASE : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = target_sizes.numpy()
__SCREAMING_SNAKE_CASE : Any = []
for idx in range(len(lowerCAmelCase__ ) ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = logits.argmax(dim=1 )
__SCREAMING_SNAKE_CASE : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 260 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : Union[str, Any] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCAmelCase : str ={
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__lowerCAmelCase : Optional[int] ={
'allenai/led-base-16384': 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Any = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__SCREAMING_SNAKE_CASE : Any = bs[:]
__SCREAMING_SNAKE_CASE : List[str] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowercase__ )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE : List[str] = [chr(lowercase__ ) for n in cs]
return dict(zip(lowercase__ , lowercase__ ) )
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : int = set()
__SCREAMING_SNAKE_CASE : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = char
return pairs
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self :int , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple="replace" , lowerCAmelCase__ :List[str]="<s>" , lowerCAmelCase__ :List[str]="</s>" , lowerCAmelCase__ :Any="</s>" , lowerCAmelCase__ :int="<s>" , lowerCAmelCase__ :List[str]="<unk>" , lowerCAmelCase__ :List[str]="<pad>" , lowerCAmelCase__ :Dict="<mask>" , lowerCAmelCase__ :Union[str, Any]=False , **lowerCAmelCase__ :int , ) -> str:
__SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__SCREAMING_SNAKE_CASE : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__SCREAMING_SNAKE_CASE : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE : int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as vocab_handle:
__SCREAMING_SNAKE_CASE : List[Any] = json.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : int = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE : List[Any] = bytes_to_unicode()
__SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as merges_handle:
__SCREAMING_SNAKE_CASE : Union[str, Any] = merges_handle.read().split('''\n''' )[1:-1]
__SCREAMING_SNAKE_CASE : Optional[Any] = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : List[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __magic_name__( self :Optional[Any] ) -> Dict:
return len(self.encoder )
def __magic_name__( self :Union[str, Any] ) -> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__( self :Dict , lowerCAmelCase__ :Any ) -> Tuple:
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE : List[Any] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE : Union[str, Any] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Dict = bigram
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
while i < len(lowerCAmelCase__ ):
try:
__SCREAMING_SNAKE_CASE : int = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE : Optional[Any] = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE : List[str] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_pairs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = ''' '''.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = word
return word
def __magic_name__( self :str , lowerCAmelCase__ :Dict ) -> List[str]:
__SCREAMING_SNAKE_CASE : List[Any] = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(''' ''' ) )
return bpe_tokens
def __magic_name__( self :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def __magic_name__( self :Dict , lowerCAmelCase__ :int ) -> Union[str, Any]:
return self.decoder.get(lowerCAmelCase__ )
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Optional[int] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = ''''''.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def __magic_name__( self :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__SCREAMING_SNAKE_CASE : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(
lowerCAmelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + '''\n''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
__SCREAMING_SNAKE_CASE : str = token_index
writer.write(''' '''.join(lowerCAmelCase__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Any = [self.cls_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __magic_name__( self :Any , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
__SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __magic_name__( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict=False , **lowerCAmelCase__ :Union[str, Any] ) -> int:
__SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE : str = ''' ''' + text
return (text, kwargs)
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Union[Dict[str, EncodedInput], BatchEncoding] , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :PaddingStrategy = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[bool] = None , ) -> dict:
__SCREAMING_SNAKE_CASE : Tuple = super()._pad(
encoded_inputs=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding_strategy=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
# Load from model defaults
if return_attention_mask is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__SCREAMING_SNAKE_CASE : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__SCREAMING_SNAKE_CASE : Dict = len(encoded_inputs['''global_attention_mask'''] ) != len(lowerCAmelCase__ )
if needs_to_be_padded:
__SCREAMING_SNAKE_CASE : List[Any] = len(lowerCAmelCase__ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__SCREAMING_SNAKE_CASE : int = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
__SCREAMING_SNAKE_CASE : Tuple = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 260 | 1 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : int = BarthezTokenizer
__lowercase : Any = BarthezTokenizerFast
__lowercase : Dict = True
__lowercase : Optional[int] = True
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
super().setUp()
__snake_case = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__SCREAMING_SNAKE_CASE )
__snake_case = tokenizer
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = '''<pad>'''
__snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , 10_1122 )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_1122 )
@require_torch
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__snake_case = [0, 57, 3018, 7_0307, 91, 2]
__snake_case = self.tokenizer(
__SCREAMING_SNAKE_CASE , max_length=len(__SCREAMING_SNAKE_CASE ) , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = '''I was born in 92000, and this is falsé.'''
__snake_case = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case = self.get_rust_tokenizer()
__snake_case = tokenizer.encode(__SCREAMING_SNAKE_CASE )
__snake_case = rust_tokenizer.encode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = {'''input_ids''': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__snake_case = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=__SCREAMING_SNAKE_CASE , )
| 24 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase__ :
def __init__( self : Any,__A : int=2,__A : Any=3,__A : Optional[int]=6_4,__A : Tuple=None ):
_lowerCamelCase : int = np.random.default_rng(__A )
_lowerCamelCase : List[str] = length
_lowerCamelCase : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1,size=(length,) ).astype(np.floataa )
def __len__( self : Dict ):
return self.length
def __getitem__( self : str,__A : List[str] ):
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : Optional[Any]=0,__A : Optional[int]=0,__A : Dict=False ):
super().__init__()
_lowerCamelCase : Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Optional[int] = True
def lowerCamelCase_ ( self : List[str],__A : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : List[str]=0,__A : List[str]=0,__A : int=False ):
super().__init__()
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Tuple = True
def lowerCamelCase_ ( self : str,__A : List[Any]=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a + self.b
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
_lowerCamelCase : int = load_dataset("csv" , data_files=_lowerCAmelCase )
_lowerCamelCase : Dict = datasets["train"].unique("label" )
_lowerCamelCase : Optional[Any] = {v: i for i, v in enumerate(_lowerCAmelCase )}
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=2 )
_lowerCamelCase : Optional[int] = DataLoader(tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader | 44 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowerCamelCase = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 112 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=UpperCAmelCase__ , )
assert hasattr(self , "env" )
def lowerCAmelCase__ ( self , UpperCAmelCase__=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-single''' , instance_count=UpperCAmelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCAmelCase__ , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def lowerCAmelCase__ ( self , UpperCAmelCase__ ):
TrainingJobAnalytics(UpperCAmelCase__ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
def lowerCAmelCase__ ( self ):
# create estimator
SCREAMING_SNAKE_CASE__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
SCREAMING_SNAKE_CASE__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCAmelCase__ )
| 112 | 1 |
from maths.prime_factors import prime_factors
def lowerCamelCase_(lowerCamelCase_ ) -> str:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase = F'Input value of [number={number}] must be an integer'
raise TypeError(lowerCAmelCase__ )
if number < 1:
raise ValueError("Input must be a positive integer" )
return -1 if len(prime_factors(lowerCAmelCase__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : str ) -> Dict:
'''simple docstring'''
a__ : List[str] = False
def __lowerCAmelCase ( self : Tuple , A__ : Optional[int] , A__ : Optional[Any] , A__ : List[str] , A__ : Tuple ) -> Optional[int]:
'''simple docstring'''
if not self.initialized:
a__ : Optional[Any] = RagRetriever(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
a__ : Union[str, Any] = True
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
self.retriever.index.init_index()
def __lowerCAmelCase ( self : List[Any] , A__ : List[Any] , A__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.retriever._main_retrieve(A__ , A__ )
return doc_ids, retrieved_doc_embeds
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : str , A__ : Optional[int] , A__ : List[Any] , A__ : List[Any] , A__ : str , A__ : Any=None ) -> Optional[Any]:
'''simple docstring'''
if index is not None and index.is_initialized() and len(A__ ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , index=A__ , init_retrieval=A__ , )
a__ : List[str] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A__ , A__ , A__ , A__ )
for worker in self.retrieval_workers
] )
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __lowerCAmelCase ( self : Optional[int] , A__ : Optional[int] , A__ : int ) -> Dict:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
a__ : List[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
a__ , a__ : Tuple = ray.get(random_worker.retrieve.remote(A__ , A__ ) )
else:
a__ , a__ : int = self._main_retrieve(A__ , A__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A__ )
@classmethod
def __lowerCAmelCase ( cls : int , A__ : Optional[Any] , A__ : Any=None , **A__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return super(A__ , cls ).get_tokenizers(A__ , A__ , **A__ )
@classmethod
def __lowerCAmelCase ( cls : int , A__ : Optional[int] , A__ : Union[str, Any] , A__ : Union[str, Any]=None , **A__ : Dict ) -> List[Any]:
'''simple docstring'''
a__ : Dict = kwargs.pop('''config''' , A__ ) or RagConfig.from_pretrained(A__ , **A__ )
a__ : Dict = RagTokenizer.from_pretrained(A__ , config=A__ )
a__ : str = rag_tokenizer.question_encoder
a__ : List[str] = rag_tokenizer.generator
if indexed_dataset is not None:
a__ : List[Any] = '''custom'''
a__ : List[Any] = CustomHFIndex(config.retrieval_vector_size , A__ )
else:
a__ : Optional[Any] = cls._build_index(A__ )
return cls(
A__ , question_encoder_tokenizer=A__ , generator_tokenizer=A__ , retrieval_workers=A__ , index=A__ , )
| 688 | 0 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
SCREAMING_SNAKE_CASE__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
SCREAMING_SNAKE_CASE__ = tuple[int, int]
class A__ :
def __init__( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : Node | None , ) -> None:
"""simple docstring"""
__lowercase = pos_x
__lowercase = pos_y
__lowercase = (pos_y, pos_x)
__lowercase = goal_x
__lowercase = goal_y
__lowercase = g_cost
__lowercase = parent
__lowercase = self.calculate_heuristic()
__lowercase = self.g_cost + self.h_cost
def a__ ( self : List[Any] ) -> float:
"""simple docstring"""
__lowercase = self.pos_x - self.goal_x
__lowercase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_UpperCAmelCase ) + abs(_UpperCAmelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : List[Any] , _UpperCAmelCase : Node ) -> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : TPosition , _UpperCAmelCase : TPosition ) -> Optional[Any]:
"""simple docstring"""
__lowercase = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _UpperCAmelCase )
__lowercase = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _UpperCAmelCase )
__lowercase = [self.start]
__lowercase = []
__lowercase = False
def a__ ( self : Tuple ) -> list[TPosition]:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowercase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_UpperCAmelCase )
self.closed_nodes.append(_UpperCAmelCase )
__lowercase = self.get_successors(_UpperCAmelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_UpperCAmelCase )
else:
# retrieve the best current path
__lowercase = self.open_nodes.pop(self.open_nodes.index(_UpperCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_UpperCAmelCase )
else:
self.open_nodes.append(_UpperCAmelCase )
return [self.start.pos]
def a__ ( self : List[str] , _UpperCAmelCase : Node ) -> list[Node]:
"""simple docstring"""
__lowercase = []
for action in delta:
__lowercase = parent.pos_x + action[1]
__lowercase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_UpperCAmelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_UpperCAmelCase , _UpperCAmelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _UpperCAmelCase , ) )
return successors
def a__ ( self : Optional[int] , _UpperCAmelCase : Node | None ) -> list[TPosition]:
"""simple docstring"""
__lowercase = node
__lowercase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowercase = current_node.parent
path.reverse()
return path
class A__ :
def __init__( self : List[str] , _UpperCAmelCase : TPosition , _UpperCAmelCase : TPosition ) -> None:
"""simple docstring"""
__lowercase = AStar(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = AStar(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = False
def a__ ( self : List[Any] ) -> list[TPosition]:
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__lowercase = self.fwd_astar.open_nodes.pop(0 )
__lowercase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_UpperCAmelCase , _UpperCAmelCase )
self.fwd_astar.closed_nodes.append(_UpperCAmelCase )
self.bwd_astar.closed_nodes.append(_UpperCAmelCase )
__lowercase = current_bwd_node
__lowercase = current_fwd_node
__lowercase = {
self.fwd_astar: self.fwd_astar.get_successors(_UpperCAmelCase ),
self.bwd_astar: self.bwd_astar.get_successors(_UpperCAmelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_UpperCAmelCase )
else:
# retrieve the best current path
__lowercase = astar.open_nodes.pop(
astar.open_nodes.index(_UpperCAmelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_UpperCAmelCase )
else:
astar.open_nodes.append(_UpperCAmelCase )
return [self.fwd_astar.start.pos]
def a__ ( self : Dict , _UpperCAmelCase : Node , _UpperCAmelCase : Node ) -> list[TPosition]:
"""simple docstring"""
__lowercase = self.fwd_astar.retrace_path(_UpperCAmelCase )
__lowercase = self.bwd_astar.retrace_path(_UpperCAmelCase )
bwd_path.pop()
bwd_path.reverse()
__lowercase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
SCREAMING_SNAKE_CASE__ = (0, 0)
SCREAMING_SNAKE_CASE__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = AStar(init, goal)
SCREAMING_SNAKE_CASE__ = a_star.search()
SCREAMING_SNAKE_CASE__ = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = BidirectionalAStar(init, goal)
SCREAMING_SNAKE_CASE__ = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 710 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = ProphetNetTokenizer
lowerCAmelCase__ : str = False
def a__ ( self : str ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def a__ ( self : str , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = 'UNwant\u00E9d,running'
__lowercase = 'unwanted, running'
return input_text, output_text
def a__ ( self : Any ) -> Any:
"""simple docstring"""
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def a__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def a__ ( self : str ) -> str:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def a__ ( self : str ) -> Dict:
"""simple docstring"""
__lowercase = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def a__ ( self : Any ) -> int:
"""simple docstring"""
__lowercase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowercase = {}
for i, token in enumerate(_UpperCAmelCase ):
__lowercase = i
__lowercase = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
@require_torch
def a__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowercase = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
__lowercase = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def a__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
__lowercase = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
__lowercase = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 688 | 0 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A ( __lowercase ):
_snake_case =42
class A ( nn.Module ):
def __init__( self: Optional[Any] , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: str=3 , _lowerCAmelCase: Tuple=("DownEncoderBlock2D",) , _lowerCAmelCase: Optional[int]=(64,) , _lowerCAmelCase: List[Any]=2 , _lowerCAmelCase: Optional[Any]=32 , _lowerCAmelCase: List[Any]="silu" , _lowerCAmelCase: str=True , ) -> Tuple:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ =layers_per_block
UpperCAmelCase_ =torch.nn.Convad(
_lowerCAmelCase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase_ =None
UpperCAmelCase_ =nn.ModuleList([] )
# down
UpperCAmelCase_ =block_out_channels[0]
for i, down_block_type in enumerate(_lowerCAmelCase ):
UpperCAmelCase_ =output_channel
UpperCAmelCase_ =block_out_channels[i]
UpperCAmelCase_ =i == len(_lowerCAmelCase ) - 1
UpperCAmelCase_ =get_down_block(
_lowerCAmelCase , num_layers=self.layers_per_block , in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=_lowerCAmelCase , resnet_groups=_lowerCAmelCase , attention_head_dim=_lowerCAmelCase , temb_channels=_lowerCAmelCase , )
self.down_blocks.append(_lowerCAmelCase )
# mid
UpperCAmelCase_ =UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_lowerCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowerCAmelCase , temb_channels=_lowerCAmelCase , )
# out
UpperCAmelCase_ =nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_lowerCAmelCase , eps=1e-6 )
UpperCAmelCase_ =nn.SiLU()
UpperCAmelCase_ =2 * out_channels if double_z else out_channels
UpperCAmelCase_ =nn.Convad(block_out_channels[-1] , _lowerCAmelCase , 3 , padding=1 )
UpperCAmelCase_ =False
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: int ) -> int:
'''simple docstring'''
UpperCAmelCase_ =x
UpperCAmelCase_ =self.conv_in(_lowerCAmelCase )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowerCAmelCase: Optional[Any] ):
def custom_forward(*_lowerCAmelCase: Union[str, Any] ):
return module(*_lowerCAmelCase )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
UpperCAmelCase_ =torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowerCAmelCase ) , _lowerCAmelCase , use_reentrant=_lowerCAmelCase )
# middle
UpperCAmelCase_ =torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowerCAmelCase , use_reentrant=_lowerCAmelCase )
else:
for down_block in self.down_blocks:
UpperCAmelCase_ =torch.utils.checkpoint.checkpoint(create_custom_forward(_lowerCAmelCase ) , _lowerCAmelCase )
# middle
UpperCAmelCase_ =torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , _lowerCAmelCase )
else:
# down
for down_block in self.down_blocks:
UpperCAmelCase_ =down_block(_lowerCAmelCase )
# middle
UpperCAmelCase_ =self.mid_block(_lowerCAmelCase )
# post-process
UpperCAmelCase_ =self.conv_norm_out(_lowerCAmelCase )
UpperCAmelCase_ =self.conv_act(_lowerCAmelCase )
UpperCAmelCase_ =self.conv_out(_lowerCAmelCase )
return sample
class A ( nn.Module ):
def __init__( self: Union[str, Any] , _lowerCAmelCase: Dict=3 , _lowerCAmelCase: List[str]=3 , _lowerCAmelCase: int=("UpDecoderBlock2D",) , _lowerCAmelCase: Optional[int]=(64,) , _lowerCAmelCase: Tuple=2 , _lowerCAmelCase: Dict=32 , _lowerCAmelCase: Tuple="silu" , _lowerCAmelCase: str="group" , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ =layers_per_block
UpperCAmelCase_ =nn.Convad(
_lowerCAmelCase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase_ =None
UpperCAmelCase_ =nn.ModuleList([] )
UpperCAmelCase_ =in_channels if norm_type == "spatial" else None
# mid
UpperCAmelCase_ =UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=_lowerCAmelCase , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowerCAmelCase , temb_channels=_lowerCAmelCase , )
# up
UpperCAmelCase_ =list(reversed(_lowerCAmelCase ) )
UpperCAmelCase_ =reversed_block_out_channels[0]
for i, up_block_type in enumerate(_lowerCAmelCase ):
UpperCAmelCase_ =output_channel
UpperCAmelCase_ =reversed_block_out_channels[i]
UpperCAmelCase_ =i == len(_lowerCAmelCase ) - 1
UpperCAmelCase_ =get_up_block(
_lowerCAmelCase , num_layers=self.layers_per_block + 1 , in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , prev_output_channel=_lowerCAmelCase , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=_lowerCAmelCase , resnet_groups=_lowerCAmelCase , attention_head_dim=_lowerCAmelCase , temb_channels=_lowerCAmelCase , resnet_time_scale_shift=_lowerCAmelCase , )
self.up_blocks.append(_lowerCAmelCase )
UpperCAmelCase_ =output_channel
# out
if norm_type == "spatial":
UpperCAmelCase_ =SpatialNorm(block_out_channels[0] , _lowerCAmelCase )
else:
UpperCAmelCase_ =nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_lowerCAmelCase , eps=1e-6 )
UpperCAmelCase_ =nn.SiLU()
UpperCAmelCase_ =nn.Convad(block_out_channels[0] , _lowerCAmelCase , 3 , padding=1 )
UpperCAmelCase_ =False
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: int , _lowerCAmelCase: Optional[int]=None ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =z
UpperCAmelCase_ =self.conv_in(_lowerCAmelCase )
UpperCAmelCase_ =next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowerCAmelCase: Dict ):
def custom_forward(*_lowerCAmelCase: Union[str, Any] ):
return module(*_lowerCAmelCase )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
UpperCAmelCase_ =torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowerCAmelCase , _lowerCAmelCase , use_reentrant=_lowerCAmelCase )
UpperCAmelCase_ =sample.to(_lowerCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase_ =torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , use_reentrant=_lowerCAmelCase )
else:
# middle
UpperCAmelCase_ =torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =sample.to(_lowerCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase_ =torch.utils.checkpoint.checkpoint(create_custom_forward(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase )
else:
# middle
UpperCAmelCase_ =self.mid_block(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =sample.to(_lowerCAmelCase )
# up
for up_block in self.up_blocks:
UpperCAmelCase_ =up_block(_lowerCAmelCase , _lowerCAmelCase )
# post-process
if latent_embeds is None:
UpperCAmelCase_ =self.conv_norm_out(_lowerCAmelCase )
else:
UpperCAmelCase_ =self.conv_norm_out(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =self.conv_act(_lowerCAmelCase )
UpperCAmelCase_ =self.conv_out(_lowerCAmelCase )
return sample
class A ( nn.Module ):
def __init__( self: Tuple , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: int=None , _lowerCAmelCase: Union[str, Any]="random" , _lowerCAmelCase: Optional[int]=False , _lowerCAmelCase: Union[str, Any]=True ) -> str:
'''simple docstring'''
super().__init__()
UpperCAmelCase_ =n_e
UpperCAmelCase_ =vq_embed_dim
UpperCAmelCase_ =beta
UpperCAmelCase_ =legacy
UpperCAmelCase_ =nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
UpperCAmelCase_ =remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
UpperCAmelCase_ =self.used.shape[0]
UpperCAmelCase_ =unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCAmelCase_ =self.re_embed
UpperCAmelCase_ =self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
UpperCAmelCase_ =n_e
UpperCAmelCase_ =sane_index_shape
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: Any ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =inds.shape
assert len(_lowerCAmelCase ) > 1
UpperCAmelCase_ =inds.reshape(ishape[0] , -1 )
UpperCAmelCase_ =self.used.to(_lowerCAmelCase )
UpperCAmelCase_ =(inds[:, :, None] == used[None, None, ...]).long()
UpperCAmelCase_ =match.argmax(-1 )
UpperCAmelCase_ =match.sum(2 ) < 1
if self.unknown_index == "random":
UpperCAmelCase_ =torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
UpperCAmelCase_ =self.unknown_index
return new.reshape(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =inds.shape
assert len(_lowerCAmelCase ) > 1
UpperCAmelCase_ =inds.reshape(ishape[0] , -1 )
UpperCAmelCase_ =self.used.to(_lowerCAmelCase )
if self.re_embed > self.used.shape[0]: # extra token
UpperCAmelCase_ =0 # simply set to zero
UpperCAmelCase_ =torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _lowerCAmelCase )
return back.reshape(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =z.permute(0 , 2 , 3 , 1 ).contiguous()
UpperCAmelCase_ =z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCAmelCase_ =torch.argmin(torch.cdist(_lowerCAmelCase , self.embedding.weight ) , dim=1 )
UpperCAmelCase_ =self.embedding(_lowerCAmelCase ).view(z.shape )
UpperCAmelCase_ =None
UpperCAmelCase_ =None
# compute loss for embedding
if not self.legacy:
UpperCAmelCase_ =self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
UpperCAmelCase_ =torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
UpperCAmelCase_ =z + (z_q - z).detach()
# reshape back to match original input shape
UpperCAmelCase_ =z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
UpperCAmelCase_ =min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
UpperCAmelCase_ =self.remap_to_used(_lowerCAmelCase )
UpperCAmelCase_ =min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
UpperCAmelCase_ =min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Dict , _lowerCAmelCase: Dict ) -> Union[str, Any]:
'''simple docstring'''
if self.remap is not None:
UpperCAmelCase_ =indices.reshape(shape[0] , -1 ) # add batch axis
UpperCAmelCase_ =self.unmap_to_all(_lowerCAmelCase )
UpperCAmelCase_ =indices.reshape(-1 ) # flatten again
# get quantized latent vectors
UpperCAmelCase_ =self.embedding(_lowerCAmelCase )
if shape is not None:
UpperCAmelCase_ =z_q.view(_lowerCAmelCase )
# reshape back to match original input shape
UpperCAmelCase_ =z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A ( __lowercase ):
def __init__( self: Any , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Union[str, Any]=False ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =parameters
UpperCAmelCase_ , UpperCAmelCase_ =torch.chunk(_lowerCAmelCase , 2 , dim=1 )
UpperCAmelCase_ =torch.clamp(self.logvar , -30.0 , 20.0 )
UpperCAmelCase_ =deterministic
UpperCAmelCase_ =torch.exp(0.5 * self.logvar )
UpperCAmelCase_ =torch.exp(self.logvar )
if self.deterministic:
UpperCAmelCase_ =UpperCAmelCase_ =torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Optional[torch.Generator] = None ) -> torch.FloatTensor:
'''simple docstring'''
UpperCAmelCase_ =randn_tensor(
self.mean.shape , generator=_lowerCAmelCase , device=self.parameters.device , dtype=self.parameters.dtype )
UpperCAmelCase_ =self.mean + self.std * sample
return x
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: str=None ) -> Tuple:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[Any]=[1, 2, 3] ) -> Union[str, Any]:
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
UpperCAmelCase_ =np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] ) -> str:
'''simple docstring'''
return self.mean
| 54 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A ( __lowercase , unittest.TestCase ):
_snake_case =CanineTokenizer
_snake_case =False
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase_ =CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase__ ( self: Optional[int] ) -> List[str]:
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def lowerCAmelCase__ ( self: Union[str, Any] , **_lowerCAmelCase: List[Any] ) -> CanineTokenizer:
'''simple docstring'''
UpperCAmelCase_ =self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
UpperCAmelCase_ =1024
return tokenizer
@require_torch
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
UpperCAmelCase_ =[5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCAmelCase__ ( self: int ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , _lowerCAmelCase )
self.assertIn("attention_mask" , _lowerCAmelCase )
self.assertIn("token_type_ids" , _lowerCAmelCase )
@require_torch
def lowerCAmelCase__ ( self: str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.canine_tokenizer
UpperCAmelCase_ =[
"What's the weater?",
"It's about 25 degrees.",
]
UpperCAmelCase_ =tokenizer(
text_target=_lowerCAmelCase , max_length=32 , padding="max_length" , truncation=_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
shutil.rmtree(_lowerCAmelCase )
UpperCAmelCase_ =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ =tempfile.mkdtemp()
UpperCAmelCase_ =" He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ =tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase_ =chr(0xe0_07 )
additional_special_tokens.append(_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
tokenizer.save_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =after_tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
self.assertIn(_lowerCAmelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ =tokenizer.__class__.from_pretrained(_lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ , UpperCAmelCase_ =self.get_clean_sequence(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_05
UpperCAmelCase_ =chr(_lowerCAmelCase )
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
UpperCAmelCase_ =tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , input_encoded + special_token_id )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =chr(0xe0_05 )
UpperCAmelCase_ =chr(0xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCAmelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.tokenize(_lowerCAmelCase )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(len(_lowerCAmelCase ) , 1 )
self.assertEqual(token_a[0] , _lowerCAmelCase )
self.assertEqual(token_a[0] , _lowerCAmelCase )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCAmelCase )
tokenizer.from_pretrained(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ =json.load(_lowerCAmelCase )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
UpperCAmelCase_ =[new_token_a]
UpperCAmelCase_ =[new_token_a]
with open(os.path.join(_lowerCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
with open(os.path.join(_lowerCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ =tokenizer_class.from_pretrained(_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCAmelCase_ =0xe0_07
UpperCAmelCase_ =chr(_lowerCAmelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ =[AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase )]
UpperCAmelCase_ =tokenizer_class.from_pretrained(
_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , extra_ids=0 )
self.assertIn(_lowerCAmelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers(do_lower_case=_lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ ="hello world"
if self.space_between_special_tokens:
UpperCAmelCase_ ="[CLS] hello world [SEP]"
else:
UpperCAmelCase_ =input
UpperCAmelCase_ =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.decode(_lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCAmelCase , [output, output.lower()] )
def lowerCAmelCase__ ( self: List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
UpperCAmelCase_ =[
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCAmelCase_ ="a"
UpperCAmelCase_ =ord(_lowerCAmelCase )
for attr in attributes_list:
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , attr + "_id" , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(getattr(_lowerCAmelCase , attr + "_id" ) , _lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [] )
UpperCAmelCase_ =0xe0_06
UpperCAmelCase_ =chr(_lowerCAmelCase )
setattr(_lowerCAmelCase , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(_lowerCAmelCase , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def lowerCAmelCase__ ( self: List[str] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Dict ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Any ) -> List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
pass
| 54 | 1 |
def lowerCamelCase ( UpperCAmelCase_ : Optional[int] )-> List[str]:
"""simple docstring"""
a =len(UpperCAmelCase_ )
a =sum(UpperCAmelCase_ )
a =[[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
a =True
for i in range(1 , s + 1 ):
a =False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
a =dp[i][j - 1]
if arr[i - 1] <= j:
a =dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
a =s - 2 * j
break
return diff
| 321 |
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int )-> None:
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
a , a =array[indexa], array[indexa]
def lowerCamelCase ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int )-> None:
"""simple docstring"""
if length > 1:
a =int(length / 2 )
for i in range(UpperCAmelCase_ , low + middle ):
comp_and_swap(UpperCAmelCase_ , UpperCAmelCase_ , i + middle , UpperCAmelCase_ )
bitonic_merge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
bitonic_merge(UpperCAmelCase_ , low + middle , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int )-> None:
"""simple docstring"""
if length > 1:
a =int(length / 2 )
bitonic_sort(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , 1 )
bitonic_sort(UpperCAmelCase_ , low + middle , UpperCAmelCase_ , 0 )
bitonic_merge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCamelCase = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 321 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def A_ ( snake_case , snake_case=False ):
SCREAMING_SNAKE_CASE:Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE:List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def A_ ( snake_case , snake_case , snake_case=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE:Any = ""
else:
SCREAMING_SNAKE_CASE:str = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE:List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE:int = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE:Optional[Any] = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE:Tuple = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE:Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE:Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE:Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE:List[str] = in_proj_bias[-config.hidden_size :]
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:List[Any] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(snake_case , snake_case )
def A_ ( snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Union[str, Any] = dct.pop(snake_case )
SCREAMING_SNAKE_CASE:int = val
def A_ ( ):
SCREAMING_SNAKE_CASE:Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE:int = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def A_ ( snake_case , snake_case , snake_case=True ):
SCREAMING_SNAKE_CASE:Optional[int] = ViTConfig()
# patch_size
if model_name[-1] == "8":
SCREAMING_SNAKE_CASE:Tuple = 8
# set labels if required
if not base_model:
SCREAMING_SNAKE_CASE:Union[str, Any] = 1000
SCREAMING_SNAKE_CASE:str = "huggingface/label-files"
SCREAMING_SNAKE_CASE:Tuple = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE:Any = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE:Optional[Any] = {int(snake_case ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE:Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE:Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
SCREAMING_SNAKE_CASE:str = 384
SCREAMING_SNAKE_CASE:Any = 1536
SCREAMING_SNAKE_CASE:Union[str, Any] = 12
SCREAMING_SNAKE_CASE:str = 6
# load original model from torch hub
SCREAMING_SNAKE_CASE:int = torch.hub.load("facebookresearch/dino:main" , snake_case )
original_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE:List[str] = original_model.state_dict()
if base_model:
remove_classification_head_(snake_case )
SCREAMING_SNAKE_CASE:str = create_rename_keys(snake_case , base_model=snake_case )
for src, dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
read_in_q_k_v(snake_case , snake_case , snake_case )
# load HuggingFace model
if base_model:
SCREAMING_SNAKE_CASE:Any = ViTModel(snake_case , add_pooling_layer=snake_case ).eval()
else:
SCREAMING_SNAKE_CASE:Dict = ViTForImageClassification(snake_case ).eval()
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by ViTImageProcessor
SCREAMING_SNAKE_CASE:List[str] = ViTImageProcessor()
SCREAMING_SNAKE_CASE:Optional[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
SCREAMING_SNAKE_CASE:str = encoding["pixel_values"]
SCREAMING_SNAKE_CASE:str = model(snake_case )
if base_model:
SCREAMING_SNAKE_CASE:Dict = original_model(snake_case )
assert torch.allclose(snake_case , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
SCREAMING_SNAKE_CASE:str = original_model(snake_case )
assert logits.shape == outputs.logits.shape
assert torch.allclose(snake_case , outputs.logits , atol=1e-3 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
A_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 143 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase: List[Any] = logging.get_logger(__name__)
_lowercase: int = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class lowerCamelCase__ ( UpperCAmelCase ):
UpperCamelCase__ ="mgp-str"
def __init__( self : int , lowercase__ : List[Any]=[32, 1_28] , lowercase__ : str=4 , lowercase__ : List[str]=3 , lowercase__ : Tuple=27 , lowercase__ : Optional[Any]=38 , lowercase__ : Optional[Any]=5_02_57 , lowercase__ : Dict=3_05_22 , lowercase__ : Dict=7_68 , lowercase__ : Tuple=12 , lowercase__ : int=12 , lowercase__ : int=4.0 , lowercase__ : Tuple=True , lowercase__ : List[Any]=False , lowercase__ : int=1e-5 , lowercase__ : int=0.0 , lowercase__ : Optional[Any]=0.0 , lowercase__ : List[Any]=0.0 , lowercase__ : Union[str, Any]=False , lowercase__ : str=0.0_2 , **lowercase__ : Optional[int] , ):
super().__init__(**lowercase__ )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = max_token_length
_lowerCAmelCase = num_character_labels
_lowerCAmelCase = num_bpe_labels
_lowerCAmelCase = num_wordpiece_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = distilled
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = drop_rate
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = attn_drop_rate
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = output_aa_attentions
_lowerCAmelCase = initializer_range
| 192 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class _SCREAMING_SNAKE_CASE :
def __init__( self : Any , __UpperCamelCase : int ) -> None:
"""simple docstring"""
snake_case__ : Optional[int] = value
snake_case__ : Node | None = None
snake_case__ : Node | None = None
class _SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , __UpperCamelCase : Node ) -> None:
"""simple docstring"""
snake_case__ : Optional[int] = tree
def lowerCAmelCase ( self : Any , __UpperCamelCase : Node | None ) -> int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : str ) -> Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 574 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_lowercase : List[Any] =TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
_lowercase : List[Any] =[]
_lowercase : List[str] =[]
_lowercase : Tuple ={"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
_lowercase : Union[str, Any] =[
{
"type": "header",
"text": {
"type": "plain_text",
"text": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
"emoji": True,
},
}
]
_lowercase : int =0
for log in Path().glob("*.log"):
_lowercase : Tuple =0
with open(log, "r") as f:
for line in f:
_lowercase : str =json.loads(line)
if line.get("nodeid", "") != "":
_lowercase : List[Any] =line["nodeid"]
if line.get("duration", None) is not None:
_lowercase : Optional[int] =F"{line['duration']:.4f}"
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_lowercase : Optional[int] =[]
log.unlink()
_lowercase : Optional[int] =""
_lowercase : List[Any] =[]
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
_lowercase : Dict =[]
_lowercase : Dict ={}
for test in failed_tests:
_lowercase : Union[str, Any] =test[0].split("::")
_lowercase : Dict =data[0].split("/")[-1]
if data[0] not in filesafailed:
_lowercase : Union[str, Any] =[data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_lowercase : Dict =[test[0] for test in failed_table]
_lowercase : Dict =list(set(files))
# Count number of instances in failed_tests
_lowercase : Tuple =[]
for file in individual_files:
table.append([file, len(filesafailed[file])])
_lowercase : Dict =tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
_lowercase : Tuple ="Too many failed tests, please see the full report in the Action results."
_lowercase : Optional[int] =len(err) + 10
_lowercase : str =message[: 3000 - offset] + F"\n...\n```\n{err}"
print(F"### {message}")
else:
_lowercase : Union[str, Any] ="No failed tests! 🤗"
print(F"## {message}")
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
_lowercase : List[str] =WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
_lowercase : Optional[Any] ={
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
_lowercase : Union[str, Any] ={
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
_lowercase : Tuple ={
"type": "context",
"elements": [
{
"type": "plain_text",
"text": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
_lowercase : List[str] =client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
_lowercase : int =response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_lowercase : Tuple =""
for i, row in enumerate(test_failures):
if row[0] != test_class:
_lowercase : List[str] =row[0]
else:
_lowercase : int =""
_lowercase : Tuple ={
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 574 | 1 |
from timeit import timeit
UpperCamelCase : Dict = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def UpperCamelCase_ ( __a ) -> bool:
a__ : Union[str, Any] = 0
a__ : List[Any] = len(__a ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def UpperCamelCase_ ( __a ) -> bool:
a__ : Any = len(__a ) // 2
a__ : Optional[Any] = len(__a )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(__a ) )
def UpperCamelCase_ ( __a ) -> bool:
if len(__a ) <= 2:
return True
if s[0] == s[len(__a ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def UpperCamelCase_ ( __a ) -> bool:
return s == s[::-1]
def UpperCamelCase_ ( __a ) -> None:
a__ : str = f'''all({name}(key) is value for key, value in test_data.items())'''
a__ : Tuple = f'''from __main__ import test_data, {name}'''
a__ : Optional[int] = 500_000
a__ : Dict = timeit(stmt=__a , setup=__a , number=__a )
print(f'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"""{key:21} {value}""")
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 37 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
__magic_name__ : str = ProphetNetTokenizer
__magic_name__ : Dict = False
def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().setUp()
__UpperCamelCase : Optional[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__UpperCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : int ) -> List[str]:
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = """UNwant\u00E9d,running"""
__UpperCamelCase : List[str] = """unwanted, running"""
return input_text, output_text
def lowerCamelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__UpperCamelCase : Optional[int] = self.tokenizer_class(self.vocab_file )
__UpperCamelCase : List[str] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCAmelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def lowerCamelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : List[str] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def lowerCamelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : Any = BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
__UpperCamelCase : List[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase : Dict = BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase : List[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase , strip_accents=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__UpperCamelCase : int = BasicTokenizer(do_lower_case=lowerCAmelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase : int = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__UpperCamelCase : List[Any] = {}
for i, token in enumerate(lowerCAmelCase ):
__UpperCamelCase : Dict = i
__UpperCamelCase : Any = WordpieceTokenizer(vocab=lowerCAmelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
@require_torch
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__UpperCamelCase : str = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__UpperCamelCase : int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__UpperCamelCase : Optional[int] = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
__UpperCamelCase : Optional[int] = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
__UpperCamelCase : int = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowerCamelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def lowerCamelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def lowerCamelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
@slow
def lowerCamelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
__UpperCamelCase : List[str] = self.tokenizer_class.from_pretrained("""microsoft/prophetnet-large-uncased""" )
__UpperCamelCase : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase )
__UpperCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase )
__UpperCamelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
__UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 279 | 0 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a__ : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( _UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : Optional[int] = XLNetTokenizer
UpperCamelCase : Optional[int] = XLNetTokenizerFast
UpperCamelCase : Tuple = True
UpperCamelCase : Dict = True
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase = XLNetTokenizer(__magic_name__ , keep_accents=__magic_name__ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = '<s>'
_lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '<eod>' )
self.assertEqual(len(__magic_name__ ) , 1_0_0_6 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = XLNetTokenizer(__magic_name__ , keep_accents=__magic_name__ )
_lowerCAmelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(__magic_name__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] )
_lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] )
_lowerCAmelCase = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = XLNetTokenizer(__magic_name__ , do_lower_case=__magic_name__ )
_lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + '',
'i',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['▁he', 'll', 'o'] )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = XLNetTokenizer(__magic_name__ , do_lower_case=__magic_name__ )
_lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__magic_name__ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
'se',
'.',
] , )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = XLNetTokenizer.from_pretrained('xlnet-base-cased' )
_lowerCAmelCase = tokenizer.encode('sequence builders' , add_special_tokens=__magic_name__ )
_lowerCAmelCase = tokenizer.encode('multi-sequence build' , add_special_tokens=__magic_name__ )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
_lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = {'input_ids': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name='xlnet-base-cased' , revision='c841166438c31ec7ca9a106dee7bb312b73ae511' , )
| 702 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def A__ ( __lowerCamelCase ):
"""simple docstring"""
return np.maximum(0, __lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309 | 0 |
import heapq
def _UpperCamelCase ( lowercase__ ):
__SCREAMING_SNAKE_CASE : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(lowercase__ , [-1 * len(lowercase__ ), (key, value)] )
# chosen_vertices = set of chosen vertices
__SCREAMING_SNAKE_CASE : List[str] = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
__SCREAMING_SNAKE_CASE : List[Any] = heapq.heappop(lowercase__ )[1][0]
chosen_vertices.add(lowercase__ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
__SCREAMING_SNAKE_CASE : List[Any] = elem[1][1].index(lowercase__ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(lowercase__ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Union[str, Any] ={0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 696 | import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
UpperCAmelCase_ = data_utils.TransfoXLTokenizer
UpperCAmelCase_ = data_utils.TransfoXLCorpus
UpperCAmelCase_ = data_utils
UpperCAmelCase_ = data_utils
def lowerCAmelCase_ ( lowercase: Tuple , lowercase: Union[str, Any] , lowercase: Any , lowercase: List[Any] ) -> List[Any]:
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(lowercase , '''rb''' ) as fp:
_UpperCamelCase: Optional[int] = pickle.load(lowercase , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCamelCase: Tuple = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
_UpperCamelCase: List[str] = corpus.vocab.__dict__
torch.save(lowercase , lowercase )
_UpperCamelCase: str = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , lowercase )
_UpperCamelCase: str = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(lowercase , lowercase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCamelCase: Optional[Any] = os.path.abspath(lowercase )
_UpperCamelCase: List[str] = os.path.abspath(lowercase )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCamelCase: Tuple = TransfoXLConfig()
else:
_UpperCamelCase: Union[str, Any] = TransfoXLConfig.from_json_file(lowercase )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCamelCase: Optional[int] = TransfoXLLMHeadModel(lowercase )
_UpperCamelCase: int = load_tf_weights_in_transfo_xl(lowercase , lowercase , lowercase )
# Save pytorch-model
_UpperCamelCase: List[str] = os.path.join(lowercase , lowercase )
_UpperCamelCase: Union[str, Any] = os.path.join(lowercase , lowercase )
print(F"""Save PyTorch model to {os.path.abspath(lowercase )}""" )
torch.save(model.state_dict() , lowercase )
print(F"""Save configuration file to {os.path.abspath(lowercase )}""" )
with open(lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
UpperCAmelCase_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 271 | 0 |
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = [int(snake_case_ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(snake_case_ ) == 4 and all(0 <= int(snake_case_ ) <= 254 for octet in octets )
if __name__ == "__main__":
_snake_case = input().strip()
_snake_case = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 54 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = list(snake_case_ )
_A : List[Any] = list(snake_case_ )
_A : Tuple = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
_A : Optional[Any] = """_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def lowerCAmelCase_ ( snake_case_ ):
_A : Optional[Any] = []
while True:
_A : int = ["""$"""] * len(snake_case_ )
_A : Any = []
for i in range(len(snake_case_ ) ):
for j in range(i + 1,len(snake_case_ ) ):
_A : Tuple = compare_string(binary[i],binary[j] )
if k is False:
_A : str = """*"""
_A : str = """*"""
temp.append("""X""" )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
_A : Dict = list(set(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : List[str] = []
for minterm in minterms:
_A : Tuple = """"""
for _ in range(snake_case_ ):
_A : Optional[Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
_A : Dict = list(snake_case_ )
_A : Tuple = list(snake_case_ )
_A : Dict = 0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = []
_A : str = [0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
_A : Union[str, Any] = 0
_A : Optional[Any] = -1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
_A : Dict = j
if count == 1:
_A : int = 1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
_A : int = 0
temp.append(prime_implicants[i] )
while True:
_A : Optional[Any] = 0
_A : Tuple = -1
_A : List[Any] = 0
for i in range(len(snake_case_ ) ):
_A : List[str] = chart[i].count(1 )
if count_n > max_n:
_A : Optional[int] = count_n
_A : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
_A : Optional[int] = 0
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[int] = [[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
_A : List[Any] = prime_implicants[i].count("""_""" )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i],binary[j],snake_case_ ):
_A : Union[str, Any] = 1
return chart
def lowerCAmelCase_ ( ):
_A : Dict = int(input("""Enter the no. of variables\n""" ) )
_A : Dict = [
float(snake_case_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
_A : int = decimal_to_binary(snake_case_,snake_case_ )
_A : Optional[Any] = check(snake_case_ )
print("""Prime Implicants are:""" )
print(snake_case_ )
_A : int = prime_implicant_chart(snake_case_,snake_case_ )
_A : int = selection(snake_case_,snake_case_ )
print("""Essential Prime Implicants are:""" )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 54 | 1 |
def _a ( __UpperCamelCase : int ,__UpperCamelCase : int ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__UpperCamelCase ,int(b / 2 ) ) * actual_power(__UpperCamelCase ,int(b / 2 ) )
else:
return a * actual_power(__UpperCamelCase ,int(b / 2 ) ) * actual_power(__UpperCamelCase ,int(b / 2 ) )
def _a ( __UpperCamelCase : int ,__UpperCamelCase : int ):
if b < 0:
return 1 / actual_power(__UpperCamelCase ,__UpperCamelCase )
return actual_power(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 233 |
from math import log
from scipy.constants import Boltzmann, physical_constants
A__ : Optional[Any] = 3_0_0 # TEMPERATURE (unit = K)
def _a ( __UpperCamelCase : float ,__UpperCamelCase : float ,__UpperCamelCase : float ,):
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = DiTPipeline
__SCREAMING_SNAKE_CASE : List[str] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__SCREAMING_SNAKE_CASE : Any = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
__SCREAMING_SNAKE_CASE : Dict = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__SCREAMING_SNAKE_CASE : Tuple = False
def UpperCAmelCase__ ( self : int ):
torch.manual_seed(0 )
_UpperCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowercase__ , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=lowercase__ , )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any]=0 ):
if str(lowercase__ ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(lowercase__ )
else:
_UpperCAmelCase = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
_UpperCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = """cpu"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**lowercase__ )
pipe.to(lowercase__ )
pipe.set_progress_bar_config(disable=lowercase__ )
_UpperCAmelCase = self.get_dummy_inputs(lowercase__ )
_UpperCAmelCase = pipe(**lowercase__ ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase__ , 1e-3 )
def UpperCAmelCase__ ( self : Tuple ):
self._test_inference_batch_single_identical(relax_max_difference=lowercase__ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase__ ( self : str ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCAmelCase__ ( self : Tuple ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
_UpperCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_UpperCAmelCase = pipe.get_label_ids(lowercase__ )
_UpperCAmelCase = pipe(lowercase__ , generator=lowercase__ , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(lowercase__ , lowercase__ ):
_UpperCAmelCase = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def UpperCAmelCase__ ( self : List[Any] ):
_UpperCAmelCase = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
_UpperCAmelCase = ["""vase""", """umbrella"""]
_UpperCAmelCase = pipe.get_label_ids(lowercase__ )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(lowercase__ , generator=lowercase__ , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(lowercase__ , lowercase__ ):
_UpperCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 704 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , __UpperCamelCase : Any , __UpperCamelCase : List[str]=13 , __UpperCamelCase : Dict=32 , __UpperCamelCase : str=2 , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : str=16 , __UpperCamelCase : Any=[1, 2, 1] , __UpperCamelCase : Dict=[2, 2, 4] , __UpperCamelCase : List[str]=2 , __UpperCamelCase : int=2.0 , __UpperCamelCase : str=True , __UpperCamelCase : str=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : int=0.1 , __UpperCamelCase : str="gelu" , __UpperCamelCase : str=False , __UpperCamelCase : List[str]=True , __UpperCamelCase : str=0.02 , __UpperCamelCase : int=1e-5 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Tuple=None , __UpperCamelCase : List[str]=True , __UpperCamelCase : Dict=10 , __UpperCamelCase : Any=8 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : List[str] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = SwinvaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : List[str] ):
_UpperCAmelCase = SwinvaForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = SwinvaForMaskedImageModeling(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ):
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = SwinvaForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : int = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = SwinvaModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , embed_dim=37 )
def UpperCAmelCase__ ( self : Dict ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def UpperCAmelCase__ ( self : str ):
pass
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def UpperCAmelCase__ ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = outputs.attentions
_UpperCAmelCase = len(self.model_tester.depths )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = config.window_size**2
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_UpperCAmelCase = len(__UpperCamelCase )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
if hasattr(self.model_tester , "num_hidden_states_types" ):
_UpperCAmelCase = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_UpperCAmelCase = 2
self.assertEqual(out_len + added_hidden_states , len(__UpperCamelCase ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ):
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# Swinv2 has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_UpperCAmelCase = outputs.reshaped_hidden_states
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reshaped_hidden_states[0].shape
_UpperCAmelCase = (
reshaped_hidden_states[0].view(__UpperCamelCase , __UpperCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = SwinvaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ):
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
__UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 129 | 0 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCAmelCase: List[Any] =logging.get_logger("transformers.models.speecht5")
def __snake_case ( __A ,__A ,__A ) -> List[str]:
hf_model.apply_weight_norm()
lowercase : Tuple = checkpoint["""input_conv.weight_g"""]
lowercase : Union[str, Any] = checkpoint["""input_conv.weight_v"""]
lowercase : Optional[int] = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
lowercase : Any = checkpoint[F'''upsamples.{i}.1.weight_g''']
lowercase : str = checkpoint[F'''upsamples.{i}.1.weight_v''']
lowercase : Optional[int] = checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowercase : Dict = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
lowercase : Optional[int] = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
lowercase : int = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
lowercase : Optional[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
lowercase : Union[str, Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
lowercase : Optional[int] = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
lowercase : str = checkpoint["""output_conv.1.weight_g"""]
lowercase : List[Any] = checkpoint["""output_conv.1.weight_v"""]
lowercase : str = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __snake_case ( __A ,__A ,__A ,__A=None ,__A=None ,) -> Optional[Any]:
if config_path is not None:
lowercase : List[str] = SpeechTaHifiGanConfig.from_pretrained(__A )
else:
lowercase : Tuple = SpeechTaHifiGanConfig()
lowercase : List[str] = SpeechTaHifiGan(__A )
lowercase : Optional[int] = torch.load(__A )
load_weights(orig_checkpoint["""model"""]["""generator"""] ,__A ,__A )
lowercase : int = np.load(__A )
lowercase : List[str] = stats[0].reshape(-1 )
lowercase : Optional[Any] = stats[1].reshape(-1 )
lowercase : List[Any] = torch.from_numpy(__A ).float()
lowercase : List[Any] = torch.from_numpy(__A ).float()
model.save_pretrained(__A )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(__A )
if __name__ == "__main__":
lowerCAmelCase: List[str] =argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowerCAmelCase: Dict =parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 607 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __snake_case ( __A ) -> Any:
lowercase : List[str] = os.path.join(args.tf_model_dir ,"""parameters.json""" )
lowercase : Any = json.loads(open(__A ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(""".pt""" ):
lowercase : List[str] = args.output + """.pt"""
lowercase : Dict = OrderedDict()
with tf.device("""/CPU:0""" ):
lowercase : Any = tf.train.load_checkpoint(args.tf_model_dir )
lowercase : List[Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase : Optional[Any] = reader.get_tensor(__A ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
lowercase : str = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
lowercase : List[Any] = 8
lowercase : Optional[int] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int = torch.tensor(__A )
elif key_name.startswith("""model/moe""" ):
lowercase : Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
lowercase : Any = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
lowercase : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Tuple = torch.tensor(__A )
elif key_name.endswith("""/softmlp/kernel""" ):
lowercase : str = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
lowercase : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[int] = torch.tensor(__A )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
lowercase : Union[str, Any] = key_name[-9:-7]
for i in range(16 ):
lowercase : int = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
lowercase : str = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase : List[Any] = torch.tensor(__A )
elif key_name.startswith("""model/mlp""" ):
lowercase : Any = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
lowercase : Dict = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
lowercase : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int = torch.tensor(__A )
elif key_name.endswith("""/p1/bias""" ):
lowercase : Tuple = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
lowercase : Tuple = vnp.copy() # same because it is one dimensional
lowercase : Tuple = torch.tensor(__A )
elif key_name.endswith("""/p2/kernel""" ):
lowercase : int = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
lowercase : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] = torch.tensor(__A )
elif key_name.endswith("""/p2/bias""" ):
lowercase : Any = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
lowercase : str = vnp.copy() # same because it is one dimensional
lowercase : str = torch.tensor(__A )
elif key_name.startswith("""model/ln""" ):
lowercase : Union[str, Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
lowercase : Any = """model.blocks.%d.feed_forward.norm.bias""" % player
lowercase : List[str] = vnp.copy() # same because it is one dimensional
lowercase : Any = torch.tensor(__A )
elif key_name.endswith("""/g""" ):
lowercase : int = """model.blocks.%d.feed_forward.norm.weight""" % player
lowercase : Union[str, Any] = vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] = torch.tensor(__A )
elif key_name.startswith("""model/att""" ):
lowercase : Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
lowercase : Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase : Any = state[:, 0, :, :]
lowercase : List[Any] = state[:, 1, :, :]
lowercase : Optional[Any] = state[:, 2, :, :]
lowercase : Dict = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : List[str] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : str = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
lowercase : Optional[int] = torch.tensor(__A )
lowercase : int = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
lowercase : Optional[Any] = torch.tensor(__A )
lowercase : Tuple = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
lowercase : List[str] = torch.tensor(__A )
elif key_name.endswith("""/o/kernel""" ):
lowercase : Any = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
lowercase : List[Any] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[int] = torch.tensor(__A )
elif key_name.startswith("""model/an""" ):
lowercase : Tuple = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
lowercase : List[str] = """model.blocks.%d.self_attn.norm.bias""" % player
lowercase : List[str] = vnp.copy() # same because it is one dimensional
lowercase : int = torch.tensor(__A )
elif key_name.endswith("""/g""" ):
lowercase : Any = """model.blocks.%d.self_attn.norm.weight""" % player
lowercase : Union[str, Any] = vnp.copy() # same because it is one dimensional
lowercase : int = torch.tensor(__A )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
lowercase : Any = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
lowercase : Optional[int] = """model.%s.weight""" % nlayer
lowercase : Any = vnp.copy() # same in embedded
lowercase : Dict = torch.tensor(__A )
if key_name.startswith("""model/wte""" ):
lowercase : Optional[Any] = """lm_head.weight"""
lowercase : int = vnp.copy() # same in embedded
lowercase : str = torch.tensor(__A )
elif key_name.startswith("""model/wob""" ):
lowercase : str = """final_logits_bias"""
lowercase : List[str] = vnp.copy() # same in embedded
lowercase : str = state.reshape((1, -1) )
lowercase : Tuple = torch.tensor(__A )
elif key_name == "model/dense/kernel":
lowercase : Dict = """model.last_project.weight"""
lowercase : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int = torch.tensor(__A )
elif key_name == "model/dense_1/bias":
lowercase : List[str] = """model.last_project.bias"""
lowercase : Dict = vnp.copy() # same because it is one dimensional
lowercase : List[str] = torch.tensor(__A )
torch.save(__A ,args.output )
if __name__ == "__main__":
lowerCAmelCase: Tuple =argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
lowerCAmelCase: Tuple =parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 607 | 1 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 715 |
import collections
import os
import re
from pathlib import Path
lowercase : int = """src/transformers"""
# Matches is_xxx_available()
lowercase : List[str] = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowercase : str = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase : List[str] = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowercase : List[Any] = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowercase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase : Optional[Any] = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase : List[Any] = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase : Tuple = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowercase : str = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowercase : int = re.compile(r"""^\s*try:""")
# Catches a line with else:
lowercase : List[str] = re.compile(r"""^\s*else:""")
def A_ ( A__ ) -> Optional[int]:
if _re_test_backend.search(A__ ) is None:
return None
a__ : Optional[Any] = [b[0] for b in _re_backend.findall(A__ )]
backends.sort()
return "_and_".join(A__ )
def A_ ( A__ ) -> str:
with open(A__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
a__ : Optional[Any] = f.readlines()
a__ : Optional[Any] = 0
while line_index < len(A__ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A__ ):
return None
# First grab the objects without a specific backend in _import_structure
a__ : int = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
a__ : Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A__ ):
a__ : Optional[int] = _re_one_line_import_struct.search(A__ ).groups()[0]
a__ : List[str] = re.findall(R'\[([^\]]+)\]' , A__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
a__ : int = _re_import_struct_key_value.search(A__ )
if single_line_import_search is not None:
a__ : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(A__ ) > 0]
objects.extend(A__ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
a__ : Any = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
a__ : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
a__ : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
a__ : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
a__ : Any = lines[line_index]
if _re_import_struct_add_one.search(A__ ) is not None:
objects.append(_re_import_struct_add_one.search(A__ ).groups()[0] )
elif _re_import_struct_add_many.search(A__ ) is not None:
a__ : int = _re_import_struct_add_many.search(A__ ).groups()[0].split(', ' )
a__ : List[Any] = [obj[1:-1] for obj in imports if len(A__ ) > 0]
objects.extend(A__ )
elif _re_between_brackets.search(A__ ) is not None:
a__ : List[str] = _re_between_brackets.search(A__ ).groups()[0].split(', ' )
a__ : int = [obj[1:-1] for obj in imports if len(A__ ) > 0]
objects.extend(A__ )
elif _re_quote_object.search(A__ ) is not None:
objects.append(_re_quote_object.search(A__ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
a__ : Optional[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
a__ : Union[str, Any] = []
while (
line_index < len(A__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
a__ : List[Any] = lines[line_index]
a__ : List[str] = _re_import.search(A__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
a__ : Dict = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(A__ ):
# If the line is an if is_backend_available, we grab all objects associated.
a__ : Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
a__ : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
a__ : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
a__ : List[str] = lines[line_index]
a__ : Union[str, Any] = _re_import.search(A__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
a__ : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A_ ( A__ , A__ ) -> Dict:
def find_duplicates(A__ ):
return [k for k, v in collections.Counter(A__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
a__ : Union[str, Any] = []
for key in import_dict_objects.keys():
a__ : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
a__ : Optional[int] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
a__ : str = 'base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A_ ( ) -> List[Any]:
a__ : Tuple = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
a__ : Tuple = os.path.join(A__ , '__init__.py' )
a__ : Optional[int] = parse_init(A__ )
if objects is not None:
a__ : List[Any] = analyze_results(*A__ )
if len(A__ ) > 0:
a__ : List[Any] = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(A__ ) )
if len(A__ ) > 0:
raise ValueError('\n\n'.join(A__ ) )
def A_ ( ) -> List[Any]:
a__ : List[str] = []
for path, directories, files in os.walk(A__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(A__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A__ ) / folder).glob('*.py' ) ) ) == 0:
continue
a__ : List[str] = str((Path(A__ ) / folder).relative_to(A__ ) )
a__ : List[Any] = short_path.replace(os.path.sep , '.' )
submodules.append(A__ )
for fname in files:
if fname == "__init__.py":
continue
a__ : str = str((Path(A__ ) / fname).relative_to(A__ ) )
a__ : Any = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(A__ )
return submodules
lowercase : Union[str, Any] = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def A_ ( ) -> Dict:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
a__ : List[Any] = direct_transformers_import(A__ )
a__ : Optional[int] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(A__ , '__init__.py' ) , 'r' ) as f:
a__ : Optional[Any] = f.read()
import_structure_keys.update(set(re.findall(R'import_structure\[\"([^\"]*)\"\]' , A__ ) ) )
a__ : List[str] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(A__ ) > 0:
a__ : Optional[int] = '\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registed in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 392 | 0 |
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if not (isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )):
raise ValueError('''longest_common_substring() takes two strings for inputs''' )
UpperCAmelCase_ : Optional[Any] = len(_lowercase )
UpperCAmelCase_ : Optional[Any] = len(_lowercase )
UpperCAmelCase_ : Dict = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Tuple = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
UpperCAmelCase_ : Dict = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
UpperCAmelCase_ : Any = i
UpperCAmelCase_ : Optional[Any] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod() | 30 |
'''simple docstring'''
import operator
def A__ ( __lowerCAmelCase : list , __lowerCAmelCase : bool = False , __lowerCAmelCase : list | None = None ):
lowerCamelCase__ = operator.lt if reverse else operator.gt
lowerCamelCase__ = solution or []
if not arr:
return solution
lowerCamelCase__ = [arr.pop(0 )]
for i, item in enumerate(__lowerCAmelCase ):
if _operator(__lowerCAmelCase , sublist[-1] ):
sublist.append(__lowerCAmelCase )
arr.pop(__lowerCAmelCase )
# merging sublist into solution list
if not solution:
solution.extend(__lowerCAmelCase )
else:
while sublist:
lowerCamelCase__ = sublist.pop(0 )
for i, xx in enumerate(__lowerCAmelCase ):
if not _operator(__lowerCAmelCase , __lowerCAmelCase ):
solution.insert(__lowerCAmelCase , __lowerCAmelCase )
break
else:
solution.append(__lowerCAmelCase )
strand_sort(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 50 | 0 |
'''simple docstring'''
from __future__ import annotations
def __A ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] ): # noqa: E741
"""simple docstring"""
while r - l > 1:
__SCREAMING_SNAKE_CASE : str = (l + r) // 2
if v[m] >= key:
__SCREAMING_SNAKE_CASE : Optional[int] = m
else:
__SCREAMING_SNAKE_CASE : Optional[int] = m # noqa: E741
return r
def __A ( _SCREAMING_SNAKE_CASE : list[int] ):
"""simple docstring"""
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 0
__SCREAMING_SNAKE_CASE : Any = [0] * len(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Dict = 1
__SCREAMING_SNAKE_CASE : Any = v[0]
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
if v[i] < tail[0]:
__SCREAMING_SNAKE_CASE : Any = v[i]
elif v[i] > tail[length - 1]:
__SCREAMING_SNAKE_CASE : Tuple = v[i]
length += 1
else:
__SCREAMING_SNAKE_CASE : Tuple = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 564 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def __A ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if not sentence:
return ""
__SCREAMING_SNAKE_CASE : str = dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 564 | 1 |
'''simple docstring'''
def __lowerCamelCase ( __snake_case : list, __snake_case : list, __snake_case : int ) -> Any:
"""simple docstring"""
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
A__ : Any =[p / w for p, w in zip(_lowerCamelCase, _lowerCamelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
A__ : Optional[int] =sorted(_lowerCamelCase )
# declaring useful variables
A__ : List[Any] =len(_lowerCamelCase )
A__ : List[Any] =0
A__ : List[Any] =0
A__ : Dict =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
A__ : Optional[Any] =sorted_profit_by_weight[length - i - 1]
A__ : int =profit_by_weight.index(_lowerCamelCase )
A__ : int =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
'Input profits, weights, and then max_weight (all positive ints) separated by '
'spaces.'
)
__snake_case : Optional[Any] = [int(x) for x in input('Input profits separated by spaces: ').split()]
__snake_case : Any = [int(x) for x in input('Input weights separated by spaces: ').split()]
__snake_case : str = int(input('Max weight allowed: '))
# Function Call
calc_profit(profit, weight, max_weight)
| 215 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = "▁"
lowercase__ = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase__ = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
lowercase__ = {
"facebook/m2m100_418M": 1024,
}
# fmt: off
lowercase__ = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = []
_lowerCAmelCase = []
def __init__(self , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<pad>" , _lowercase="<unk>" , _lowercase="m2m100" , _lowercase = None , _lowercase=8 , **_lowercase , ):
'''simple docstring'''
__a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__a : List[str] = language_codes
__a : str = FAIRSEQ_LANGUAGE_CODES[language_codes]
__a : Optional[int] = {lang_code: F'''__{lang_code}__''' for lang_code in fairseq_language_code}
__a : Optional[int] = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_lowercase )
for lang_code in fairseq_language_code
if self.get_lang_token(_lowercase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_lowercase , tgt_lang=_lowercase , bos_token=_lowercase , eos_token=_lowercase , sep_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , language_codes=_lowercase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_lowercase , **_lowercase , )
__a : Optional[Any] = vocab_file
__a : List[Any] = load_json(_lowercase )
__a : List[str] = {v: k for k, v in self.encoder.items()}
__a : List[Any] = spm_file
__a : int = load_spm(_lowercase , self.sp_model_kwargs )
__a : Dict = len(self.encoder )
__a : Optional[int] = {
self.get_lang_token(_lowercase ): self.encoder_size + i for i, lang_code in enumerate(_lowercase )
}
__a : Dict = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_lowercase )}
__a : Tuple = {v: k for k, v in self.lang_token_to_id.items()}
__a : List[str] = src_lang if src_lang is not None else """en"""
__a : List[Any] = tgt_lang
__a : List[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__a : Optional[Any] = num_madeup_words
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_lowercase , self.encoder[self.unk_token] )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_lowercase , self.unk_token )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Any = []
__a : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowercase ) + token
__a : Optional[Any] = []
else:
current_sub_tokens.append(_lowercase )
out_string += self.sp_model.decode(_lowercase )
return out_string.strip()
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
__a : str = [1] * len(self.prefix_tokens )
__a : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowercase )) + suffix_ones
return prefix_ones + ([0] * len(_lowercase )) + ([0] * len(_lowercase )) + suffix_ones
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
'''simple docstring'''
__a : Optional[Any] = self.__dict__.copy()
__a : List[str] = None
return state
def __setstate__(self , _lowercase ):
'''simple docstring'''
__a : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a : List[str] = {}
__a : Any = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : Tuple = Path(_lowercase )
if not save_dir.is_dir():
raise OSError(F'''{save_directory} should be a directory''' )
__a : Union[str, Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
__a : List[Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , _lowercase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowercase )
elif not os.path.isfile(self.spm_file ):
with open(_lowercase , """wb""" ) as fi:
__a : List[str] = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (str(_lowercase ), str(_lowercase ))
def lowerCAmelCase__(self , _lowercase , _lowercase = "en" , _lowercase = None , _lowercase = "ro" , **_lowercase , ):
'''simple docstring'''
__a : Dict = src_lang
__a : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_lowercase , _lowercase , **_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__a : Dict = src_lang
__a : List[str] = self(_lowercase , add_special_tokens=_lowercase , **_lowercase )
__a : Union[str, Any] = self.get_lang_id(_lowercase )
__a : Tuple = tgt_lang_id
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : str = self.get_lang_token(_lowercase )
__a : Union[str, Any] = self.lang_token_to_id[lang_token]
__a : Optional[Any] = [self.cur_lang_id]
__a : List[str] = [self.eos_token_id]
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Dict = self.get_lang_token(_lowercase )
__a : Union[str, Any] = self.lang_token_to_id[lang_token]
__a : Tuple = [self.cur_lang_id]
__a : Dict = [self.eos_token_id]
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self.lang_code_to_token[lang]
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = self.get_lang_token(_lowercase )
return self.lang_token_to_id[lang_token]
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ):
__a : Optional[int] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def __magic_name__ ( _lowerCamelCase : str ):
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str ):
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 581 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _UpperCAmelCase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=6_4 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=1_0 , lowercase_=0.0_2 , lowercase_=[1, 1_6, 4, 4] , lowercase_=None , ) -> Union[str, Any]:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
UpperCAmelCase = (self.image_size // 3_2) ** 2
UpperCAmelCase = num_patches + 1
def a_ ( self ) -> List[str]:
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def a_ ( self ) -> Any:
UpperCAmelCase = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowercase_ , )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
UpperCAmelCase = ViTHybridModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = ViTHybridForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Any = False
def a_ ( self ) -> Tuple:
UpperCAmelCase = ViTHybridModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=3_7 )
def a_ ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def a_ ( self ) -> List[str]:
pass
def a_ ( self ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def a_ ( self ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowercase_ )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def a_ ( self ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def a_ ( self ) -> int:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = _config_zero_init(lowercase_ )
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(config=lowercase_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
UpperCAmelCase = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def a_ ( self ) -> Tuple:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = ViTHybridModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowercase__ ( ) -> int:
"""simple docstring"""
UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def a_ ( self ) -> Optional[int]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a_ ( self ) -> Tuple:
UpperCAmelCase = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowercase_ )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowercase_ , return_tensors='pt' ).to(lowercase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**lowercase_ )
# verify the logits
UpperCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
@require_accelerate
def a_ ( self ) -> List[Any]:
UpperCAmelCase = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
UpperCAmelCase = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowercase_ , return_tensors='pt' )
UpperCAmelCase = model(**lowercase_ )
UpperCAmelCase = outputs.logits
# model predicts one of the 1000 ImageNet classes
UpperCAmelCase = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 719 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=3 , lowercase_=3_2 , lowercase_=3 , lowercase_=1_0 , lowercase_=[1_0, 2_0, 3_0, 4_0] , lowercase_=[1, 1, 2, 1] , lowercase_=True , lowercase_=True , lowercase_="relu" , lowercase_=3 , lowercase_=None , ) -> str:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = embeddings_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = depths
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = num_labels
UpperCAmelCase = scope
UpperCAmelCase = len(lowercase_ )
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = self.get_config()
return config, pixel_values
def a_ ( self ) -> Optional[Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a_ ( self , lowercase_ , lowercase_ ) -> List[Any]:
UpperCAmelCase = FlaxRegNetModel(config=lowercase_ )
UpperCAmelCase = model(lowercase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def a_ ( self , lowercase_ , lowercase_ ) -> Union[str, Any]:
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaxRegNetForImageClassification(config=lowercase_ )
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self ) -> List[Any]:
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Tuple = False
def a_ ( self ) -> None:
UpperCAmelCase = FlaxRegNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def a_ ( self ) -> List[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ ( self ) -> Optional[Any]:
return
def a_ ( self ) -> Any:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def a_ ( self ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def a_ ( self ) -> List[str]:
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def a_ ( self ) -> Optional[int]:
pass
def a_ ( self ) -> Dict:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowercase_ )
UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase_ )
def a_ ( self ) -> int:
def check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = model_class(lowercase_ )
UpperCAmelCase = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def a_ ( self ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowercase_ , lowercase_ )
UpperCAmelCase = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ , **lowercase_ ):
return model(pixel_values=lowercase_ , **lowercase_ )
with self.subTest('JIT Enabled' ):
UpperCAmelCase = model_jitted(**lowercase_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCAmelCase = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ , lowercase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def a_ ( self ) -> List[str]:
return AutoImageProcessor.from_pretrained('facebook/regnet-y-040' ) if is_vision_available() else None
@slow
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = FlaxRegNetForImageClassification.from_pretrained('facebook/regnet-y-040' )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowercase_ , return_tensors='np' )
UpperCAmelCase = model(**lowercase_ )
# verify the logits
UpperCAmelCase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 183 | 0 |
def lowercase ( __A : str , __A : str ) -> bool:
'''simple docstring'''
snake_case : str = len(__A )
snake_case : Union[str, Any] = len(__A )
snake_case : Union[str, Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
snake_case : List[Any] = True
for i in range(__A ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
snake_case : Any = True
if a[i].islower():
snake_case : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__lowerCamelCase : int = logging.get_logger(__name__)
@add_end_docstrings(
_A , R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class _lowercase ( _A ):
def lowercase__ ( self , a ):
if self.framework == "tf":
snake_case__ : int =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
snake_case__ : Optional[Any] =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=a )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def lowercase__ ( self , a ):
snake_case__ : str =self.get_masked_index(a )
snake_case__ : Any =np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def lowercase__ ( self , a ):
if isinstance(a , a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(a )
def lowercase__ ( self , a , a=None , **a ):
if return_tensors is None:
snake_case__ : Optional[Any] =self.framework
snake_case__ : List[str] =self.tokenizer(a , return_tensors=a )
self.ensure_exactly_one_mask_token(a )
return model_inputs
def lowercase__ ( self , a ):
snake_case__ : Optional[Any] =self.model(**a )
snake_case__ : str =model_inputs["""input_ids"""]
return model_outputs
def lowercase__ ( self , a , a=5 , a=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
snake_case__ : Union[str, Any] =target_ids.shape[0]
snake_case__ : Union[str, Any] =model_outputs["""input_ids"""][0]
snake_case__ : List[Any] =model_outputs["""logits"""]
if self.framework == "tf":
snake_case__ : str =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
snake_case__ : Any =outputs.numpy()
snake_case__ : Optional[Any] =outputs[0, masked_index, :]
snake_case__ : List[Any] =stable_softmax(a , axis=-1 )
if target_ids is not None:
snake_case__ : str =tf.gather_nd(tf.squeeze(a , 0 ) , target_ids.reshape(-1 , 1 ) )
snake_case__ : List[str] =tf.expand_dims(a , 0 )
snake_case__ : Optional[Any] =tf.math.top_k(a , k=a )
snake_case__ , snake_case__ : int =topk.values.numpy(), topk.indices.numpy()
else:
snake_case__ : List[Any] =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
snake_case__ : int =outputs[0, masked_index, :]
snake_case__ : Optional[int] =logits.softmax(dim=-1 )
if target_ids is not None:
snake_case__ : Dict =probs[..., target_ids]
snake_case__ , snake_case__ : List[Any] =probs.topk(a )
snake_case__ : List[Any] =[]
snake_case__ : int =values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
snake_case__ : Dict =[]
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
snake_case__ : List[Any] =input_ids.numpy().copy()
if target_ids is not None:
snake_case__ : Tuple =target_ids[p].tolist()
snake_case__ : Any =p
# Filter padding out:
snake_case__ : int =tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
snake_case__ : Union[str, Any] =self.tokenizer.decode(a , skip_special_tokens=a )
snake_case__ : int ={"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(a )
result.append(a )
if single_mask:
return result[0]
return result
def lowercase__ ( self , a , a=None ):
if isinstance(a , a ):
snake_case__ : Tuple =[targets]
try:
snake_case__ : Any =self.tokenizer.get_vocab()
except Exception:
snake_case__ : List[Any] ={}
snake_case__ : Any =[]
for target in targets:
snake_case__ : Optional[int] =vocab.get(a , a )
if id_ is None:
snake_case__ : str =self.tokenizer(
a , add_special_tokens=a , return_attention_mask=a , return_token_type_ids=a , max_length=1 , truncation=a , )["""input_ids"""]
if len(a ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
snake_case__ : Any =input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
snake_case__ : Optional[Any] =list(set(a ) )
if len(a ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
snake_case__ : Tuple =np.array(a )
return target_ids
def lowercase__ ( self , a=None , a=None ):
snake_case__ : int ={}
if targets is not None:
snake_case__ : str =self.get_target_ids(a , a )
snake_case__ : Union[str, Any] =target_ids
if top_k is not None:
snake_case__ : Dict =top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self , a , *a , **a ):
snake_case__ : List[Any] =super().__call__(a , **a )
if isinstance(a , a ) and len(a ) == 1:
return outputs[0]
return outputs
| 385 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
a : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( _lowerCamelCase ):
def __init__( self : List[Any] , lowercase_ : Any , lowercase_ : Tuple ):
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self : Any , lowercase_ : int = 1 , lowercase_ : int = 100 , lowercase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ : Optional[float] = None , lowercase_ : bool = True , ):
if audio_length_in_s is None:
snake_case_ = self.unet.config.sample_size / self.unet.config.sample_rate
snake_case_ = audio_length_in_s * self.unet.config.sample_rate
snake_case_ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
F" {3 * down_scale_factor / self.unet.config.sample_rate}." )
snake_case_ = int(lowercase_ )
if sample_size % down_scale_factor != 0:
snake_case_ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
F" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
''' process.''' )
snake_case_ = int(lowercase_ )
snake_case_ = next(iter(self.unet.parameters() ) ).dtype
snake_case_ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
snake_case_ = randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
# set step values
self.scheduler.set_timesteps(lowercase_ , device=audio.device )
snake_case_ = self.scheduler.timesteps.to(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case_ = self.unet(lowercase_ , lowercase_ ).sample
# 2. compute previous image: x_t -> t_t-1
snake_case_ = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
snake_case_ = audio.clamp(-1 , 1 ).float().cpu().numpy()
snake_case_ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase_ )
| 593 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
a : Dict = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=False ) -> int:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
snake_case_ = os.path.abspath(__UpperCAmelCase )
logger.info(F"Loading PyTorch weights from {pt_path}" )
snake_case_ = torch.load(__UpperCAmelCase, map_location='''cpu''' )
logger.info(F"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters." )
snake_case_ = convert_pytorch_state_dict_to_flax(__UpperCAmelCase, __UpperCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
snake_case_ = convert_pytorch_sharded_state_dict_to_flax(__UpperCAmelCase, __UpperCAmelCase )
return flax_state_dict
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(__UpperCAmelCase ) -> bool:
return len(set(__UpperCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
snake_case_ = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
snake_case_ = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
snake_case_ = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
snake_case_ = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__UpperCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
snake_case_ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__UpperCAmelCase ):
snake_case_ = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
snake_case_ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__UpperCAmelCase ):
snake_case_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
snake_case_ = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
snake_case_ = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
snake_case_ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
snake_case_ = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
snake_case_ = pt_tuple_key[-2] + '''_v'''
if name is not None:
snake_case_ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = {k: v.numpy() for k, v in pt_state_dict.items()}
snake_case_ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
snake_case_ = flax_model.params['''params''']
else:
snake_case_ = flax_model.params
snake_case_ = flatten_dict(__UpperCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
snake_case_ = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__UpperCAmelCase )
snake_case_ = {}
snake_case_ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
snake_case_ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case_ = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
snake_case_ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case_ = pt_tuple_key[1:]
# Correctly rename weight parameters
snake_case_ ,snake_case_ = rename_key_and_reshape_tensor(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# add model prefix if necessary
snake_case_ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
snake_case_ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
snake_case_ = jnp.asarray(__UpperCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__UpperCAmelCase, __UpperCAmelCase )
continue
# also add unexpected weight so that warning is thrown
snake_case_ = jnp.asarray(__UpperCAmelCase )
else:
# also add unexpected weight so that warning is thrown
snake_case_ = jnp.asarray(__UpperCAmelCase )
return unflatten_dict(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
import torch
# Load the index
snake_case_ = {}
for shard_file in shard_filenames:
# load using msgpack utils
snake_case_ = torch.load(__UpperCAmelCase )
snake_case_ = {k: v.numpy() for k, v in pt_state_dict.items()}
snake_case_ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
snake_case_ = flax_model.params['''params''']
snake_case_ = flatten_dict(__UpperCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
snake_case_ = flax_model.params
snake_case_ = flatten_dict(__UpperCAmelCase )
snake_case_ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
snake_case_ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
snake_case_ = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
snake_case_ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case_ = pt_tuple_key[1:]
# Correctly rename weight parameters
snake_case_ ,snake_case_ = rename_key_and_reshape_tensor(
__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase )
# add model prefix if necessary
snake_case_ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
snake_case_ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
snake_case_ = jnp.asarray(__UpperCAmelCase )
continue
if "var" in flax_key[-1]:
snake_case_ = jnp.asarray(__UpperCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__UpperCAmelCase, __UpperCAmelCase )
continue
# also add unexpected weight so that warning is thrown
snake_case_ = jnp.asarray(__UpperCAmelCase )
else:
# also add unexpected weight so that warning is thrown
snake_case_ = jnp.asarray(__UpperCAmelCase )
return unflatten_dict(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Any:
'''simple docstring'''
snake_case_ = os.path.abspath(__UpperCAmelCase )
logger.info(F"Loading Flax weights from {flax_checkpoint_path}" )
# import correct flax class
snake_case_ = getattr(__UpperCAmelCase, '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__UpperCAmelCase, '''rb''' ) as state_f:
try:
snake_case_ = from_bytes(__UpperCAmelCase, state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"Unable to convert {flax_checkpoint_path} to Flax deserializable object. " )
return load_flax_weights_in_pytorch_model(__UpperCAmelCase, __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Dict:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
snake_case_ = flatten_dict(jax.tree_util.tree_map(lambda __UpperCAmelCase : x.dtype == jnp.bfloataa, __UpperCAmelCase ) ).values()
if any(__UpperCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
snake_case_ = jax.tree_util.tree_map(
lambda __UpperCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params, __UpperCAmelCase )
snake_case_ = flatten_dict(__UpperCAmelCase )
snake_case_ = pt_model.state_dict()
snake_case_ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
snake_case_ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
snake_case_ = []
snake_case_ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
snake_case_ = flax_key_tuple[0] == pt_model.base_model_prefix
snake_case_ = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
snake_case_ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
snake_case_ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__UpperCAmelCase ) not in pt_model_dict:
# conv layer
snake_case_ = flax_key_tuple[:-1] + ('''weight''',)
snake_case_ = jnp.transpose(__UpperCAmelCase, (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__UpperCAmelCase ) not in pt_model_dict:
# linear layer
snake_case_ = flax_key_tuple[:-1] + ('''weight''',)
snake_case_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
snake_case_ = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
snake_case_ = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
snake_case_ = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
snake_case_ = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
snake_case_ = '''.'''.join(__UpperCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
snake_case_ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
snake_case_ = key.split('''.''' )
snake_case_ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
snake_case_ = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
snake_case_ = key_components[-2] + '''_v'''
if name is not None:
snake_case_ = key_components[:-3] + [name]
snake_case_ = '''.'''.join(__UpperCAmelCase )
snake_case_ = key
if flax_key in special_pt_names:
snake_case_ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
F"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}." )
else:
# add weight to pytorch dict
snake_case_ = np.asarray(__UpperCAmelCase ) if not isinstance(__UpperCAmelCase, np.ndarray ) else flax_tensor
snake_case_ = torch.from_numpy(__UpperCAmelCase )
# remove from missing keys
missing_keys.remove(__UpperCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__UpperCAmelCase )
pt_model.load_state_dict(__UpperCAmelCase )
# re-transform missing_keys to list
snake_case_ = list(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"
F" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n" )
if len(__UpperCAmelCase ) > 0:
logger.warning(
F"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"
F" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"
''' use it for predictions and inference.''' )
else:
logger.warning(
F"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F"you can already use {pt_model.__class__.__name__} for predictions without further training." )
return pt_model
| 593 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class UpperCamelCase_ :
"""simple docstring"""
A = None
A = None
A = None # sigma(t_i)
@classmethod
def lowerCamelCase_ ( cls ):
return cls()
@dataclass
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
A = 42
A = 42
A = 42
class UpperCamelCase_ ( __UpperCamelCase ,__UpperCamelCase ):
"""simple docstring"""
@property
def lowerCamelCase_ ( self ):
return True
@register_to_config
def __init__( self , UpperCAmelCase = 0.02 , UpperCAmelCase = 1_0_0 , UpperCAmelCase = 1.0_07 , UpperCAmelCase = 8_0 , UpperCAmelCase = 0.05 , UpperCAmelCase = 5_0 , ):
pass
def lowerCamelCase_ ( self ):
return KarrasVeSchedulerState.create()
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = () ):
__lowerCamelCase = jnp.arange(0 , UpperCAmelCase )[::-1].copy()
__lowerCamelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=UpperCAmelCase , schedule=jnp.array(UpperCAmelCase , dtype=jnp.floataa ) , timesteps=UpperCAmelCase , )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
if self.config.s_min <= sigma <= self.config.s_max:
__lowerCamelCase = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
__lowerCamelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
__lowerCamelCase = random.split(UpperCAmelCase , num=1 )
__lowerCamelCase = self.config.s_noise * random.normal(key=UpperCAmelCase , shape=sample.shape )
__lowerCamelCase = sigma + gamma * sigma
__lowerCamelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ):
__lowerCamelCase = sample_hat + sigma_hat * model_output
__lowerCamelCase = (sample_hat - pred_original_sample) / sigma_hat
__lowerCamelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , state=UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ):
__lowerCamelCase = sample_prev + sigma_prev * model_output
__lowerCamelCase = (sample_prev - pred_original_sample) / sigma_prev
__lowerCamelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , state=UpperCAmelCase )
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
raise NotImplementedError()
| 479 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
_a : List[Any] = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( __UpperCamelCase ,unittest.TestCase ):
"""simple docstring"""
A = SpeechTaTokenizer
A = False
A = True
def lowerCamelCase_ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = SpeechTaTokenizer(UpperCAmelCase )
__lowerCamelCase = AddedToken("""<mask>""" , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase )
__lowerCamelCase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase = """this is a test"""
__lowerCamelCase = """this is a test"""
return input_text, output_text
def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=2_0 , UpperCAmelCase=5 ):
__lowerCamelCase , __lowerCamelCase = self.get_input_output_texts(UpperCAmelCase )
__lowerCamelCase = tokenizer.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase )
__lowerCamelCase = tokenizer.decode(UpperCAmelCase , clean_up_tokenization_spaces=UpperCAmelCase )
return text, ids
def lowerCamelCase_ ( self ):
__lowerCamelCase = """<pad>"""
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def lowerCamelCase_ ( self ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(UpperCAmelCase ) , 8_1 )
def lowerCamelCase_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 7_9 )
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.get_tokenizers(do_lower_case=UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__lowerCamelCase = tokenizer.vocab_size
__lowerCamelCase = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowerCamelCase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__lowerCamelCase = tokenizer.add_tokens(UpperCAmelCase )
__lowerCamelCase = tokenizer.vocab_size
__lowerCamelCase = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , len(UpperCAmelCase ) )
self.assertEqual(UpperCAmelCase , all_size + len(UpperCAmelCase ) )
__lowerCamelCase = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCAmelCase )
self.assertGreaterEqual(len(UpperCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowerCamelCase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__lowerCamelCase = tokenizer.add_special_tokens(UpperCAmelCase )
__lowerCamelCase = tokenizer.vocab_size
__lowerCamelCase = len(UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , 0 )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , len(UpperCAmelCase ) )
self.assertEqual(UpperCAmelCase , all_size_a + len(UpperCAmelCase ) )
__lowerCamelCase = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCAmelCase )
self.assertGreaterEqual(len(UpperCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCamelCase_ ( self ):
pass
def lowerCamelCase_ ( self ):
pass
def lowerCamelCase_ ( self ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(UpperCAmelCase , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , )
__lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
# fmt: off
self.assertListEqual(UpperCAmelCase , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] )
# fmt: on
__lowerCamelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def lowerCamelCase_ ( self ):
# Use custom sequence because this tokenizer does not handle numbers.
__lowerCamelCase = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
__lowerCamelCase = {
"""input_ids""": [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCAmelCase , )
| 479 | 1 |
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Any = 0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def __lowerCAmelCase ( __magic_name__ = 1_0_0 ):
_lowercase: Any = 1
_lowercase: Optional[int] = 2
for i in range(2 , max_n + 1 ):
_lowercase: Any = pre_numerator
_lowercase: Optional[Any] = 2 * i // 3 if i % 3 == 0 else 1
_lowercase: Dict = cur_numerator
_lowercase: Dict = e_cont * pre_numerator + temp
return sum_digits(__magic_name__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 206 |
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__magic_name__ , n - 1 , __magic_name__ ) * a) % mod
else:
_lowercase: int = binary_exponentiation(__magic_name__ , n / 2 , __magic_name__ )
return (b * b) % mod
# a prime number
_SCREAMING_SNAKE_CASE : List[Any] = 701
_SCREAMING_SNAKE_CASE : Optional[int] = 1_000_000_000
_SCREAMING_SNAKE_CASE : Union[str, Any] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 206 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'marian'
_UpperCamelCase = ['past_key_values']
_UpperCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self ,_lowerCAmelCase=5_81_01 ,_lowerCAmelCase=None ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=12 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=16 ,_lowerCAmelCase=12 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=16 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=True ,_lowerCAmelCase=True ,_lowerCAmelCase="gelu" ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=0.1 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=5_81_00 ,_lowerCAmelCase=False ,_lowerCAmelCase=5_81_00 ,_lowerCAmelCase=0 ,_lowerCAmelCase=0 ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = decoder_vocab_size or vocab_size
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = d_model
lowerCamelCase__ = encoder_ffn_dim
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = encoder_attention_heads
lowerCamelCase__ = decoder_ffn_dim
lowerCamelCase__ = decoder_layers
lowerCamelCase__ = decoder_attention_heads
lowerCamelCase__ = dropout
lowerCamelCase__ = attention_dropout
lowerCamelCase__ = activation_dropout
lowerCamelCase__ = activation_function
lowerCamelCase__ = init_std
lowerCamelCase__ = encoder_layerdrop
lowerCamelCase__ = decoder_layerdrop
lowerCamelCase__ = use_cache
lowerCamelCase__ = encoder_layers
lowerCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase__ = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,is_encoder_decoder=_lowerCAmelCase ,decoder_start_token_id=_lowerCAmelCase ,forced_eos_token_id=_lowerCAmelCase ,**_lowerCAmelCase ,)
class UpperCamelCase__ (a ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def UpperCamelCase_ ( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCamelCase__ = {0: """batch"""}
lowerCamelCase__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowerCamelCase__ = {0: """batch""", 1: """decoder_sequence"""}
lowerCamelCase__ = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCamelCase__ = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ = self.num_layers
for i in range(_lowerCAmelCase ):
lowerCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""}
else:
lowerCamelCase__ = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def UpperCamelCase_ ( self ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ = super().outputs
else:
lowerCamelCase__ = super(_lowerCAmelCase ,self ).outputs
if self.use_past:
lowerCamelCase__ , lowerCamelCase__ = self.num_layers
for i in range(_lowerCAmelCase ):
lowerCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
lowerCamelCase__ = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# Generate decoder inputs
lowerCamelCase__ = seq_length if not self.use_past else 1
lowerCamelCase__ = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
lowerCamelCase__ = dict(**_lowerCAmelCase ,**_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape
lowerCamelCase__ = common_inputs["""decoder_input_ids"""].shape[1]
lowerCamelCase__ , lowerCamelCase__ = self.num_attention_heads
lowerCamelCase__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ = decoder_seq_length + 3
lowerCamelCase__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCamelCase__ = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase )] ,dim=1 )
lowerCamelCase__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCamelCase__ , lowerCamelCase__ = self.num_layers
lowerCamelCase__ = min(_lowerCAmelCase ,_lowerCAmelCase )
lowerCamelCase__ = max(_lowerCAmelCase ,_lowerCAmelCase ) - min_num_layers
lowerCamelCase__ = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
lowerCamelCase__ = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(_lowerCAmelCase ,_lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
lowerCamelCase__ = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ , lowerCamelCase__ = self.num_layers
lowerCamelCase__ , lowerCamelCase__ = self.num_attention_heads
lowerCamelCase__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCamelCase__ = common_inputs["""attention_mask"""].dtype
lowerCamelCase__ = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 )
lowerCamelCase__ = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCamelCase__ = compute_effective_axis_dimension(
_lowerCAmelCase ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCamelCase__ = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
lowerCamelCase__ = compute_effective_axis_dimension(
_lowerCAmelCase ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCamelCase__ = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCamelCase__ = dict(tokenizer(_lowerCAmelCase ,return_tensors=_lowerCAmelCase ) )
return common_inputs
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
else:
lowerCamelCase__ = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
return common_inputs
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ):
if self.task in ["default", "seq2seq-lm"]:
lowerCamelCase__ = super()._flatten_past_key_values_(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
else:
lowerCamelCase__ = super(_lowerCAmelCase ,self )._flatten_past_key_values_(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
@property
def UpperCamelCase_ ( self ):
return 1E-4
| 50 |
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =0
@slow
def UpperCamelCase_ ( self ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_A ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_A ) , 0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(_A )
self.assertIsInstance(_A , _A )
# Check that tokenizer_type ≠ model_type
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(_A , config=_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) )
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' , use_fast=_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) )
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' , use_fast=_A )
self.assertIsInstance(_A , _A )
@require_tokenizers
def UpperCamelCase_ ( self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) )
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) )
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' )
self.assertIsInstance(_A , _A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with pytest.raises(_A ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
_SCREAMING_SNAKE_CASE =tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
if isinstance(_A , _A ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _A )
else:
self.assertEqual(tokenizer.do_lower_case , _A )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def UpperCamelCase_ ( self ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_A , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
_SCREAMING_SNAKE_CASE =tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TOKENIZER_MAPPING.values()
_SCREAMING_SNAKE_CASE =[]
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_A )
@require_tokenizers
def UpperCamelCase_ ( self ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_A ) , _A )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _A )
@require_tokenizers
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_A )
_SCREAMING_SNAKE_CASE ='''Hello, world. How are you?'''
_SCREAMING_SNAKE_CASE =tokenizer.tokenize(_A )
self.assertEqual('''[UNK]''' , tokens[0] )
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_A )
_SCREAMING_SNAKE_CASE =tokenizer.tokenize(_A )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_A ) , _A )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_A , _A )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =get_tokenizer_config('''bert-base-cased''' )
_SCREAMING_SNAKE_CASE =config.pop('''_commit_hash''' , _A )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_A , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
_SCREAMING_SNAKE_CASE =get_tokenizer_config(_A )
self.assertDictEqual(_A , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
_SCREAMING_SNAKE_CASE =get_tokenizer_config(_A )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
_SCREAMING_SNAKE_CASE =CustomTokenizer.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def UpperCamelCase_ ( self ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , _A )
# Can register in two steps
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_A , slow_tokenizer_class=_A , fast_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE =BertTokenizerFast.from_pretrained(_A )
bert_tokenizer.save_pretrained(_A )
_SCREAMING_SNAKE_CASE =CustomTokenizerFast.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(_A , use_fast=_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaises(_A ):
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(_A , trust_remote_code=_A )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(_A , trust_remote_code=_A , use_fast=_A )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def UpperCamelCase_ ( self ):
'''simple docstring'''
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : Tuple = False
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
lowercase : List[str] = NewTokenizer
lowercase : Tuple = False
try:
AutoConfig.register('''custom''' , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
# If remote code is not set, the default is to use local
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A , use_fast=_A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''bert-base is not a local folder and is not a valid model identifier''' ):
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('''bert-base''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(_A , revision='''aaaaaa''' )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 255 | 0 |
import os
def A_ ( a = "matrix.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(a ) , a ) ) as in_file:
SCREAMING_SNAKE_CASE_ : Optional[int] = in_file.read()
SCREAMING_SNAKE_CASE_ : List[str] = [[int(a ) for cell in row.split(',' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE_ : Tuple = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE_ : List[Any] = len(grid[0] )
SCREAMING_SNAKE_CASE_ : Tuple = [[0 for i in range(a )] for j in range(a )]
SCREAMING_SNAKE_CASE_ : List[Any] = grid[0][0]
for i in range(1 , a ):
SCREAMING_SNAKE_CASE_ : str = grid[0][i] + dp[0][i - 1]
for i in range(1 , a ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = grid[i][0] + dp[i - 1][0]
for i in range(1 , a ):
for j in range(1 , a ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'{solution() = }')
| 702 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : Dict = logging.get_logger(__name__)
lowerCAmelCase : str = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Union[str, Any] = '''table-transformer'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
SCREAMING_SNAKE_CASE_ : List[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = backbone_config.get('model_type' )
SCREAMING_SNAKE_CASE_ : str = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config_class.from_dict(_SCREAMING_SNAKE_CASE )
# set timm attributes to None
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = None, None, None
SCREAMING_SNAKE_CASE_ : Any = use_timm_backbone
SCREAMING_SNAKE_CASE_ : int = backbone_config
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE_ : int = d_model
SCREAMING_SNAKE_CASE_ : int = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ : Any = encoder_layers
SCREAMING_SNAKE_CASE_ : str = encoder_attention_heads
SCREAMING_SNAKE_CASE_ : List[str] = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ : str = decoder_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = dropout
SCREAMING_SNAKE_CASE_ : int = attention_dropout
SCREAMING_SNAKE_CASE_ : Dict = activation_dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = activation_function
SCREAMING_SNAKE_CASE_ : Optional[Any] = init_std
SCREAMING_SNAKE_CASE_ : int = init_xavier_std
SCREAMING_SNAKE_CASE_ : str = encoder_layerdrop
SCREAMING_SNAKE_CASE_ : Any = decoder_layerdrop
SCREAMING_SNAKE_CASE_ : Any = encoder_layers
SCREAMING_SNAKE_CASE_ : List[str] = auxiliary_loss
SCREAMING_SNAKE_CASE_ : List[str] = position_embedding_type
SCREAMING_SNAKE_CASE_ : Union[str, Any] = backbone
SCREAMING_SNAKE_CASE_ : Tuple = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dilation
# Hungarian matcher
SCREAMING_SNAKE_CASE_ : List[Any] = class_cost
SCREAMING_SNAKE_CASE_ : List[Any] = bbox_cost
SCREAMING_SNAKE_CASE_ : Union[str, Any] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE_ : Optional[Any] = mask_loss_coefficient
SCREAMING_SNAKE_CASE_ : str = dice_loss_coefficient
SCREAMING_SNAKE_CASE_ : Dict = bbox_loss_coefficient
SCREAMING_SNAKE_CASE_ : int = giou_loss_coefficient
SCREAMING_SNAKE_CASE_ : Any = eos_coefficient
super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.d_model
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : List[Any] = version.parse('''1.11''')
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return 1e-5
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return 12
| 353 | 0 |
'''simple docstring'''
def a__ ( lowercase : int = 1, lowercase : int = 1000 ) -> int:
"""simple docstring"""
_UpperCamelCase = 1
_UpperCamelCase = 0
for divide_by_number in range(lowercase, digit + 1 ):
_UpperCamelCase = []
_UpperCamelCase = numerator
for _ in range(1, digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(lowercase ):
_UpperCamelCase = len(lowercase )
_UpperCamelCase = divide_by_number
else:
has_been_divided.append(lowercase )
_UpperCamelCase = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A__ : List[Any] = 1_6
A__ : Union[str, Any] = 3_2
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase = 16 ):
"""simple docstring"""
_lowercase: Any = AutoTokenizer.from_pretrained('''bert-base-cased''' )
_lowercase: List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_lowercase: Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowercase: Tuple = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowercase: Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowercase: Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowercase: int = 16
elif accelerator.mixed_precision != "no":
_lowercase: Tuple = 8
else:
_lowercase: str = None
return tokenizer.pad(
_UpperCamelCase , padding='''longest''' , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
_lowercase: List[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
_lowercase: int = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A__ : List[Any] = mocked_dataloaders # noqa: F811
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _UpperCamelCase ) == "1":
_lowercase: Tuple = 2
# Initialize accelerator
_lowercase: Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowercase: Optional[Any] = config['''lr''']
_lowercase: Tuple = int(config['''num_epochs'''] )
_lowercase: Any = int(config['''seed'''] )
_lowercase: List[Any] = int(config['''batch_size'''] )
_lowercase: Optional[int] = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_UpperCamelCase )
def inner_training_loop(_UpperCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowercase: Optional[int] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowercase: List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_lowercase: List[str] = AdamW(params=model.parameters() , lr=_UpperCamelCase )
_lowercase , _lowercase: Tuple = get_dataloaders(_UpperCamelCase , _UpperCamelCase )
# Instantiate scheduler
_lowercase: str = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase: Union[str, Any] = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowercase: Tuple = model(**_UpperCamelCase )
_lowercase: Union[str, Any] = outputs.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowercase: str = model(**_UpperCamelCase )
_lowercase: int = outputs.logits.argmax(dim=-1 )
_lowercase , _lowercase: List[str] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
_lowercase: int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , _UpperCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Dict = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=_UpperCamelCase , default=_UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
_lowercase: Tuple = parser.parse_args()
_lowercase: str = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main()
| 353 | 0 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''MCTCTFeatureExtractor'''
SCREAMING_SNAKE_CASE_ = '''AutoTokenizer'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = self.feature_extractor
UpperCamelCase__ : str = False
def __call__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
UpperCamelCase__ : Any = kwargs.pop('''raw_speech''' )
else:
UpperCamelCase__ : Optional[int] = kwargs.pop('''audio''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = kwargs.pop('''sampling_rate''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = kwargs.pop('''text''' , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase__ : Any = args[0]
UpperCamelCase__ : Optional[Any] = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
UpperCamelCase__ : List[Any] = self.feature_extractor(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None:
UpperCamelCase__ : Dict = self.tokenizer(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCamelCase__ : List[Any] = encodings['''input_ids''']
return inputs
def __SCREAMING_SNAKE_CASE ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = kwargs.pop('''input_features''' , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = kwargs.pop('''labels''' , __SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase__ : Dict = args[0]
UpperCamelCase__ : Dict = args[1:]
if input_features is not None:
UpperCamelCase__ : int = self.feature_extractor.pad(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if labels is not None:
UpperCamelCase__ : Union[str, Any] = self.tokenizer.pad(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
UpperCamelCase__ : int = labels['''input_ids''']
return input_features
def __SCREAMING_SNAKE_CASE ( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@contextmanager
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : List[Any] = self.tokenizer
yield
UpperCamelCase__ : int = self.feature_extractor
UpperCamelCase__ : Dict = False
| 705 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
lowerCamelCase =logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(UpperCamelCase__ ):
return ext
raise Exception(
f'''Unable to determine file format from file extension {path}. '''
f'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
UpperCamelCase__ : Any = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
UpperCamelCase__ : Tuple = PipelineDataFormat.from_str(
format=UpperCamelCase__ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(UpperCamelCase__ , UpperCamelCase__ )
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = nlp
UpperCamelCase__ : Dict = reader
@staticmethod
def __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' )
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' )
run_parser.add_argument('''--input''' , type=__SCREAMING_SNAKE_CASE , help='''Path to the file to use for inference''' )
run_parser.add_argument('''--output''' , type=__SCREAMING_SNAKE_CASE , help='''Path to the file that will be used post to write results.''' )
run_parser.add_argument('''--model''' , type=__SCREAMING_SNAKE_CASE , help='''Name or path to the model to instantiate.''' )
run_parser.add_argument('''--config''' , type=__SCREAMING_SNAKE_CASE , help='''Name or path to the model\'s config to instantiate.''' )
run_parser.add_argument(
'''--tokenizer''' , type=__SCREAMING_SNAKE_CASE , help='''Name of the tokenizer to use. (default: same as the model name)''' )
run_parser.add_argument(
'''--column''' , type=__SCREAMING_SNAKE_CASE , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=__SCREAMING_SNAKE_CASE , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=__SCREAMING_SNAKE_CASE , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' )
run_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self._nlp, []
for entry in self._reader:
UpperCamelCase__ : List[str] = nlp(**__SCREAMING_SNAKE_CASE ) if self._reader.is_multi_columns else nlp(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
outputs.append(__SCREAMING_SNAKE_CASE )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
UpperCamelCase__ : Union[str, Any] = self._reader.save_binary(__SCREAMING_SNAKE_CASE )
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' )
else:
self._reader.save(__SCREAMING_SNAKE_CASE )
| 462 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : List[Any] = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A_ (a_ ):
UpperCAmelCase__ = '''poolformer'''
def __init__( self , _A=3 , _A=1_6 , _A=1_6 , _A=3 , _A=4.0 , _A=[2, 2, 6, 2] , _A=[6_4, 1_2_8, 3_2_0, 5_1_2] , _A=[7, 3, 3, 3] , _A=[4, 2, 2, 2] , _A=[2, 1, 1, 1] , _A=4 , _A=0.0 , _A="gelu" , _A=True , _A=1E-5 , _A=0.02 , **_A , ):
'''simple docstring'''
UpperCAmelCase = num_channels
UpperCAmelCase = patch_size
UpperCAmelCase = stride
UpperCAmelCase = padding
UpperCAmelCase = pool_size
UpperCAmelCase = hidden_sizes
UpperCAmelCase = mlp_ratio
UpperCAmelCase = depths
UpperCAmelCase = patch_sizes
UpperCAmelCase = strides
UpperCAmelCase = num_encoder_blocks
UpperCAmelCase = drop_path_rate
UpperCAmelCase = hidden_act
UpperCAmelCase = use_layer_scale
UpperCAmelCase = layer_scale_init_value
UpperCAmelCase = initializer_range
super().__init__(**_A )
class A_ (a_ ):
UpperCAmelCase__ = version.parse('''1.11''' )
@property
def _lowercase ( self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _lowercase ( self ):
'''simple docstring'''
return 2E-3
| 130 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class A_ (a_ ):
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
class A_ (a_ ):
def __init__( self , _A=1 , _A=0 , _A=2 , _A=5_1_2 , _A="cls" , _A=False , _A=True , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
UpperCAmelCase = project_dim
UpperCAmelCase = pooler_fn
UpperCAmelCase = learn_encoder
UpperCAmelCase = use_attention_mask
class A_ (a_ ):
UpperCAmelCase__ = [r'''pooler''', r'''logit_scale''']
UpperCAmelCase__ = [r'''position_ids''', r'''predictions.decoder.bias''']
UpperCAmelCase__ = '''roberta'''
UpperCAmelCase__ = RobertaSeriesConfig
def __init__( self , _A ):
'''simple docstring'''
super().__init__(_A )
UpperCAmelCase = XLMRobertaModel(_A )
UpperCAmelCase = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase = getattr(_A , '''has_pre_transformation''' , _A )
if self.has_pre_transformation:
UpperCAmelCase = nn.Linear(config.hidden_size , config.project_dim )
UpperCAmelCase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _lowercase ( self , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , ):
'''simple docstring'''
UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase = self.base_model(
input_ids=_A , attention_mask=_A , token_type_ids=_A , position_ids=_A , head_mask=_A , inputs_embeds=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_attentions=_A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_A , )
if self.has_pre_transformation:
UpperCAmelCase = outputs['''hidden_states'''][-2]
UpperCAmelCase = self.pre_LN(_A )
UpperCAmelCase = self.transformation_pre(_A )
return TransformationModelOutput(
projection_state=_A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
UpperCAmelCase = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 130 | 1 |
"""simple docstring"""
def _lowerCAmelCase(a : str ) -> str:
_SCREAMING_SNAKE_CASE =0
# if input_string is "aba" than new_input_string become "a|b|a"
_SCREAMING_SNAKE_CASE =''''''
_SCREAMING_SNAKE_CASE =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(a ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =0, 0
# length[i] shows the length of palindromic substring with center i
_SCREAMING_SNAKE_CASE =[1 for i in range(len(a ) )]
# for each character in new_string find corresponding palindromic string
_SCREAMING_SNAKE_CASE =0
for j in range(len(a ) ):
_SCREAMING_SNAKE_CASE =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(a )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_SCREAMING_SNAKE_CASE =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_SCREAMING_SNAKE_CASE =j - k + 1 # noqa: E741
_SCREAMING_SNAKE_CASE =j + k - 1
# update max_length and start position
if max_length < length[j]:
_SCREAMING_SNAKE_CASE =length[j]
_SCREAMING_SNAKE_CASE =j
# create that string
_SCREAMING_SNAKE_CASE =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165 |
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
UpperCAmelCase_ : Tuple = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
UpperCAmelCase_ : List[Any] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def _lowerCAmelCase(a : Union[str, Any] ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE =numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=a )[0]
@deprecated(a , '''Please use tf.data to implement this functionality.''' )
def _lowerCAmelCase(a : str ) -> Optional[int]:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=a ) as bytestream:
_SCREAMING_SNAKE_CASE =_readaa(a )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
_SCREAMING_SNAKE_CASE =_readaa(a )
_SCREAMING_SNAKE_CASE =_readaa(a )
_SCREAMING_SNAKE_CASE =_readaa(a )
_SCREAMING_SNAKE_CASE =bytestream.read(rows * cols * num_images )
_SCREAMING_SNAKE_CASE =numpy.frombuffer(a , dtype=numpy.uinta )
_SCREAMING_SNAKE_CASE =data.reshape(a , a , a , 1 )
return data
@deprecated(a , '''Please use tf.one_hot on tensors.''' )
def _lowerCAmelCase(a : Tuple , a : Dict ) -> Dict:
_SCREAMING_SNAKE_CASE =labels_dense.shape[0]
_SCREAMING_SNAKE_CASE =numpy.arange(a ) * num_classes
_SCREAMING_SNAKE_CASE =numpy.zeros((num_labels, num_classes) )
_SCREAMING_SNAKE_CASE =1
return labels_one_hot
@deprecated(a , '''Please use tf.data to implement this functionality.''' )
def _lowerCAmelCase(a : Any , a : Any=False , a : Tuple=10 ) -> Optional[int]:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=a ) as bytestream:
_SCREAMING_SNAKE_CASE =_readaa(a )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
_SCREAMING_SNAKE_CASE =_readaa(a )
_SCREAMING_SNAKE_CASE =bytestream.read(a )
_SCREAMING_SNAKE_CASE =numpy.frombuffer(a , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(a , a )
return labels
class __UpperCAmelCase :
'''simple docstring'''
@deprecated(
_A , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self , _A , _A , _A=False , _A=False , _A=dtypes.floataa , _A=True , _A=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =random_seed.get_seed(_A )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_SCREAMING_SNAKE_CASE =dtypes.as_dtype(_A ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
_SCREAMING_SNAKE_CASE =1_0_0_0_0
_SCREAMING_SNAKE_CASE =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"""images.shape: {images.shape} labels.shape: {labels.shape}"""
_SCREAMING_SNAKE_CASE =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_SCREAMING_SNAKE_CASE =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_SCREAMING_SNAKE_CASE =images.astype(numpy.floataa )
_SCREAMING_SNAKE_CASE =numpy.multiply(_A , 1.0 / 255.0 )
_SCREAMING_SNAKE_CASE =images
_SCREAMING_SNAKE_CASE =labels
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =0
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._images
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._labels
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._num_examples
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self._epochs_completed
def UpperCamelCase_ ( self , _A , _A=False , _A=True ):
'''simple docstring'''
if fake_data:
_SCREAMING_SNAKE_CASE =[1] * 7_8_4
_SCREAMING_SNAKE_CASE =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_A )],
[fake_label for _ in range(_A )],
)
_SCREAMING_SNAKE_CASE =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_SCREAMING_SNAKE_CASE =numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
_SCREAMING_SNAKE_CASE =self.images[perma]
_SCREAMING_SNAKE_CASE =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_SCREAMING_SNAKE_CASE =self._num_examples - start
_SCREAMING_SNAKE_CASE =self._images[start : self._num_examples]
_SCREAMING_SNAKE_CASE =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_SCREAMING_SNAKE_CASE =numpy.arange(self._num_examples )
numpy.random.shuffle(_A )
_SCREAMING_SNAKE_CASE =self.images[perm]
_SCREAMING_SNAKE_CASE =self.labels[perm]
# Start next epoch
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =batch_size - rest_num_examples
_SCREAMING_SNAKE_CASE =self._index_in_epoch
_SCREAMING_SNAKE_CASE =self._images[start:end]
_SCREAMING_SNAKE_CASE =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_SCREAMING_SNAKE_CASE =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(a , '''Please write your own downloading logic.''' )
def _lowerCAmelCase(a : Any , a : str , a : Optional[int] ) -> Optional[Any]:
if not gfile.Exists(a ):
gfile.MakeDirs(a )
_SCREAMING_SNAKE_CASE =os.path.join(a , a )
if not gfile.Exists(a ):
urllib.request.urlretrieve(a , a ) # noqa: S310
with gfile.GFile(a ) as f:
_SCREAMING_SNAKE_CASE =f.size()
print('''Successfully downloaded''' , a , a , '''bytes.''' )
return filepath
@deprecated(
a , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def _lowerCAmelCase(a : Optional[int] , a : Tuple=False , a : str=False , a : Union[str, Any]=dtypes.floataa , a : Tuple=True , a : Tuple=5000 , a : Union[str, Any]=None , a : List[str]=DEFAULT_SOURCE_URL , ) -> List[Any]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=a , one_hot=a , dtype=a , seed=a )
_SCREAMING_SNAKE_CASE =fake()
_SCREAMING_SNAKE_CASE =fake()
_SCREAMING_SNAKE_CASE =fake()
return _Datasets(train=a , validation=a , test=a )
if not source_url: # empty string check
_SCREAMING_SNAKE_CASE =DEFAULT_SOURCE_URL
_SCREAMING_SNAKE_CASE ='''train-images-idx3-ubyte.gz'''
_SCREAMING_SNAKE_CASE ='''train-labels-idx1-ubyte.gz'''
_SCREAMING_SNAKE_CASE ='''t10k-images-idx3-ubyte.gz'''
_SCREAMING_SNAKE_CASE ='''t10k-labels-idx1-ubyte.gz'''
_SCREAMING_SNAKE_CASE =_maybe_download(
a , a , source_url + train_images_file )
with gfile.Open(a , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =_extract_images(a )
_SCREAMING_SNAKE_CASE =_maybe_download(
a , a , source_url + train_labels_file )
with gfile.Open(a , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =_extract_labels(a , one_hot=a )
_SCREAMING_SNAKE_CASE =_maybe_download(
a , a , source_url + test_images_file )
with gfile.Open(a , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =_extract_images(a )
_SCREAMING_SNAKE_CASE =_maybe_download(
a , a , source_url + test_labels_file )
with gfile.Open(a , '''rb''' ) as f:
_SCREAMING_SNAKE_CASE =_extract_labels(a , one_hot=a )
if not 0 <= validation_size <= len(a ):
_SCREAMING_SNAKE_CASE =(
'''Validation size should be between 0 and '''
f"""{len(a )}. Received: {validation_size}."""
)
raise ValueError(a )
_SCREAMING_SNAKE_CASE =train_images[:validation_size]
_SCREAMING_SNAKE_CASE =train_labels[:validation_size]
_SCREAMING_SNAKE_CASE =train_images[validation_size:]
_SCREAMING_SNAKE_CASE =train_labels[validation_size:]
_SCREAMING_SNAKE_CASE ={'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
_SCREAMING_SNAKE_CASE =_DataSet(a , a , **a )
_SCREAMING_SNAKE_CASE =_DataSet(a , a , **a )
_SCREAMING_SNAKE_CASE =_DataSet(a , a , **a )
return _Datasets(train=a , validation=a , test=a )
| 165 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=[10, 20, 30, 40] , _UpperCAmelCase=[2, 2, 3, 2] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=10 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=["stage2", "stage3", "stage4"] , _UpperCAmelCase=[2, 3, 4] , _UpperCAmelCase=None , ):
__a : Tuple = parent
__a : Optional[Any] = batch_size
__a : List[Any] = image_size
__a : Optional[int] = num_channels
__a : int = num_stages
__a : List[str] = hidden_sizes
__a : Tuple = depths
__a : Union[str, Any] = is_training
__a : Optional[int] = use_labels
__a : Union[str, Any] = intermediate_size
__a : str = hidden_act
__a : List[str] = num_labels
__a : Union[str, Any] = initializer_range
__a : Dict = out_features
__a : Union[str, Any] = out_indices
__a : Optional[int] = scope
def _lowerCamelCase ( self ):
__a : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Optional[int] = None
if self.use_labels:
__a : Dict = ids_tensor([self.batch_size] , self.num_labels )
__a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = ConvNextModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Tuple = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : int = ConvNextForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = ConvNextBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : List[str] = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__a : List[str] = None
__a : Union[str, Any] = ConvNextBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : List[str] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowerCamelCase ( self ):
__a : List[str] = self.prepare_config_and_inputs()
__a , __a , __a : int = config_and_inputs
__a : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : Optional[int] = ConvNextModelTester(self )
__a : List[str] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self ):
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Tuple = model_class(_UpperCAmelCase )
__a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : str = [*signature.parameters.keys()]
__a : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__a : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__a : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[Any] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def _lowerCamelCase ( self ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Any = ConvNextModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __A ( ) -> Any:
__a : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self ):
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ):
__a : Optional[Any] = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(_UpperCAmelCase )
__a : Optional[Any] = self.default_image_processor
__a : Optional[Any] = prepare_img()
__a : List[str] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__a : int = model(**_UpperCAmelCase )
# verify the logits
__a : Dict = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__a : int = torch.tensor([-0.0_2_6_0, -0.4_7_3_9, 0.1_9_1_1] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
@require_torch
class __lowercase ( unittest.TestCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (ConvNextBackbone,) if is_torch_available() else ()
__lowerCAmelCase = ConvNextConfig
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : Tuple = ConvNextModelTester(self ) | 52 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def __UpperCAmelCase ( snake_case_ : str = "isbn/0140328726" ) -> dict:
"""simple docstring"""
_lowerCAmelCase = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
_lowerCAmelCase = F"""{olid} is not a valid Open Library olid"""
raise ValueError(snake_case_ )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def __UpperCAmelCase ( snake_case_ : dict ) -> dict:
"""simple docstring"""
_lowerCAmelCase = {
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
_lowerCAmelCase = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_lowerCAmelCase = [
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
_lowerCAmelCase = data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase = """, """.join(snake_case_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE : List[Any] = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (1_0, 1_3) or not isbn.isdigit():
print(F'Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.')
continue
print(F'\nSearching Open Library for ISBN: {isbn}...\n')
try:
SCREAMING_SNAKE_CASE : Tuple = summarize_book(get_openlibrary_data(F'isbn/{isbn}'))
print('''\n'''.join(F'{key}: {value}' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'Sorry, there are no results for ISBN: {isbn}.') | 156 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Dict, _snake_case : Union[str, Any], _snake_case : Optional[int] ) ->List[Any]:
return F'''gaussian_noise_s={seed}_shape={"_".join([str(_snake_case ) for s in shape] )}.npy'''
def lowercase_ ( self : Union[str, Any] ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase_ ( self : List[str], _snake_case : List[Any]=0, _snake_case : Dict=(4, 4, 6_4, 6_4), _snake_case : List[Any]=False ) ->List[Any]:
snake_case__ : str = jnp.bfloataa if fpaa else jnp.floataa
snake_case__ : List[Any] = jnp.array(load_hf_numpy(self.get_file_format(_snake_case, _snake_case ) ), dtype=_snake_case )
return image
def lowercase_ ( self : List[str], _snake_case : List[str]=False, _snake_case : str="CompVis/stable-diffusion-v1-4" ) ->Dict:
snake_case__ : Tuple = jnp.bfloataa if fpaa else jnp.floataa
snake_case__ : Tuple = 'bf16' if fpaa else None
snake_case__ , snake_case__ : Union[str, Any] = FlaxUNetaDConditionModel.from_pretrained(
_snake_case, subfolder='unet', dtype=_snake_case, revision=_snake_case )
return model, params
def lowercase_ ( self : Any, _snake_case : int=0, _snake_case : Dict=(4, 7_7, 7_6_8), _snake_case : int=False ) ->Tuple:
snake_case__ : List[Any] = jnp.bfloataa if fpaa else jnp.floataa
snake_case__ : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(_snake_case, _snake_case ) ), dtype=_snake_case )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[1_7, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1_0_0_0, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def lowercase_ ( self : Union[str, Any], _snake_case : Optional[Any], _snake_case : Optional[Any], _snake_case : Any ) ->str:
snake_case__ , snake_case__ : Tuple = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4', fpaa=_snake_case )
snake_case__ : Union[str, Any] = self.get_latents(_snake_case, fpaa=_snake_case )
snake_case__ : Any = self.get_encoder_hidden_states(_snake_case, fpaa=_snake_case )
snake_case__ : str = model.apply(
{'params': params}, _snake_case, jnp.array(_snake_case, dtype=jnp.intaa ), encoder_hidden_states=_snake_case, ).sample
assert sample.shape == latents.shape
snake_case__ : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
snake_case__ : List[str] = jnp.array(_snake_case, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_snake_case, _snake_case, atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[1_7, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1_0_0_0, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def lowercase_ ( self : Optional[int], _snake_case : List[Any], _snake_case : List[str], _snake_case : List[Any] ) ->Optional[Any]:
snake_case__ , snake_case__ : List[Any] = self.get_unet_model(model_id='stabilityai/stable-diffusion-2', fpaa=_snake_case )
snake_case__ : int = self.get_latents(_snake_case, shape=(4, 4, 9_6, 9_6), fpaa=_snake_case )
snake_case__ : Any = self.get_encoder_hidden_states(_snake_case, shape=(4, 7_7, 1_0_2_4), fpaa=_snake_case )
snake_case__ : Optional[Any] = model.apply(
{'params': params}, _snake_case, jnp.array(_snake_case, dtype=jnp.intaa ), encoder_hidden_states=_snake_case, ).sample
assert sample.shape == latents.shape
snake_case__ : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ), dtype=jnp.floataa )
snake_case__ : List[Any] = jnp.array(_snake_case, dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_snake_case, _snake_case, atol=1e-2 )
| 243 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ :List[Any] = logging.get_logger(__name__)
def lowercase_ (A : np.ndarray , A : Union[int, Iterable[int]] , A : bool , A : int ):
def constraint_to_multiple_of(A : Any , A : List[str] , A : Dict=0 , A : Optional[int]=None ):
snake_case__ : List[str] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
snake_case__ : Dict = math.floor(val / multiple ) * multiple
if x < min_val:
snake_case__ : Optional[Any] = math.ceil(val / multiple ) * multiple
return x
snake_case__ : str = (output_size, output_size) if isinstance(A , A ) else output_size
snake_case__ , snake_case__ : Dict = get_image_size(A )
snake_case__ , snake_case__ : Any = output_size
# determine new height and width
snake_case__ : List[str] = output_height / input_height
snake_case__ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
snake_case__ : List[str] = scale_width
else:
# fit height
snake_case__ : Union[str, Any] = scale_height
snake_case__ : Any = constraint_to_multiple_of(scale_height * input_height , multiple=A )
snake_case__ : Tuple = constraint_to_multiple_of(scale_width * input_width , multiple=A )
return (new_height, new_width)
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Tuple, _snake_case : bool = True, _snake_case : Dict[str, int] = None, _snake_case : PILImageResampling = PILImageResampling.BILINEAR, _snake_case : bool = False, _snake_case : int = 1, _snake_case : bool = True, _snake_case : Union[int, float] = 1 / 2_5_5, _snake_case : bool = True, _snake_case : Optional[Union[float, List[float]]] = None, _snake_case : Optional[Union[float, List[float]]] = None, **_snake_case : Dict, ) ->None:
super().__init__(**_snake_case )
snake_case__ : Optional[int] = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
snake_case__ : List[str] = get_size_dict(_snake_case )
snake_case__ : Optional[int] = do_resize
snake_case__ : Optional[int] = size
snake_case__ : Optional[Any] = keep_aspect_ratio
snake_case__ : Any = ensure_multiple_of
snake_case__ : Dict = resample
snake_case__ : int = do_rescale
snake_case__ : Any = rescale_factor
snake_case__ : Dict = do_normalize
snake_case__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ : List[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self : str, _snake_case : np.ndarray, _snake_case : Dict[str, int], _snake_case : bool = False, _snake_case : int = 1, _snake_case : PILImageResampling = PILImageResampling.BICUBIC, _snake_case : Optional[Union[str, ChannelDimension]] = None, **_snake_case : Tuple, ) ->np.ndarray:
snake_case__ : Optional[Any] = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
snake_case__ : Optional[Any] = get_resize_output_image_size(
_snake_case, output_size=(size['height'], size['width']), keep_aspect_ratio=_snake_case, multiple=_snake_case, )
return resize(_snake_case, size=_snake_case, resample=_snake_case, data_format=_snake_case, **_snake_case )
def lowercase_ ( self : Optional[Any], _snake_case : np.ndarray, _snake_case : Union[int, float], _snake_case : Optional[Union[str, ChannelDimension]] = None, **_snake_case : Dict, ) ->str:
return rescale(_snake_case, scale=_snake_case, data_format=_snake_case, **_snake_case )
def lowercase_ ( self : Union[str, Any], _snake_case : np.ndarray, _snake_case : Union[float, List[float]], _snake_case : Union[float, List[float]], _snake_case : Optional[Union[str, ChannelDimension]] = None, **_snake_case : List[str], ) ->np.ndarray:
return normalize(_snake_case, mean=_snake_case, std=_snake_case, data_format=_snake_case, **_snake_case )
def lowercase_ ( self : Optional[int], _snake_case : ImageInput, _snake_case : bool = None, _snake_case : int = None, _snake_case : bool = None, _snake_case : int = None, _snake_case : PILImageResampling = None, _snake_case : bool = None, _snake_case : float = None, _snake_case : bool = None, _snake_case : Optional[Union[float, List[float]]] = None, _snake_case : Optional[Union[float, List[float]]] = None, _snake_case : Optional[Union[str, TensorType]] = None, _snake_case : ChannelDimension = ChannelDimension.FIRST, **_snake_case : List[Any], ) ->PIL.Image.Image:
snake_case__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
snake_case__ : int = size if size is not None else self.size
snake_case__ : str = get_size_dict(_snake_case )
snake_case__ : Dict = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
snake_case__ : Union[str, Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
snake_case__ : str = resample if resample is not None else self.resample
snake_case__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : List[str] = image_mean if image_mean is not None else self.image_mean
snake_case__ : List[str] = image_std if image_std is not None else self.image_std
snake_case__ : int = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case__ : Tuple = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
snake_case__ : List[str] = [self.resize(image=_snake_case, size=_snake_case, resample=_snake_case ) for image in images]
if do_rescale:
snake_case__ : Any = [self.rescale(image=_snake_case, scale=_snake_case ) for image in images]
if do_normalize:
snake_case__ : int = [self.normalize(image=_snake_case, mean=_snake_case, std=_snake_case ) for image in images]
snake_case__ : int = [to_channel_dimension_format(_snake_case, _snake_case ) for image in images]
snake_case__ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_snake_case, tensor_type=_snake_case )
def lowercase_ ( self : List[Any], _snake_case : Dict, _snake_case : List[Tuple] = None ) ->Optional[Any]:
snake_case__ : List[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_snake_case ) != len(_snake_case ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_snake_case ):
snake_case__ : Tuple = target_sizes.numpy()
snake_case__ : int = []
for idx in range(len(_snake_case ) ):
snake_case__ : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode='bilinear', align_corners=_snake_case )
snake_case__ : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_snake_case )
else:
snake_case__ : str = logits.argmax(dim=1 )
snake_case__ : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 243 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ):
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Dict = batch_size
UpperCAmelCase__ : Union[str, Any] = seq_length
UpperCAmelCase__ : List[Any] = is_training
UpperCAmelCase__ : Tuple = use_attention_mask
UpperCAmelCase__ : List[Any] = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : Optional[int] = vocab_size
UpperCAmelCase__ : int = hidden_size
UpperCAmelCase__ : Optional[Any] = num_hidden_layers
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : Optional[Any] = intermediate_size
UpperCAmelCase__ : Optional[int] = hidden_act
UpperCAmelCase__ : Any = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = max_position_embeddings
UpperCAmelCase__ : Optional[int] = type_vocab_size
UpperCAmelCase__ : Tuple = type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = initializer_range
UpperCAmelCase__ : Any = num_choices
def snake_case__ ( self):
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : Dict = None
if self.use_attention_mask:
UpperCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase__ : Dict = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = config_and_inputs
UpperCAmelCase__ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Optional[int] = True
lowerCAmelCase :Any = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self):
UpperCAmelCase__ : int = FlaxRoFormerModelTester(self)
@slow
def snake_case__ ( self):
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : str = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=_lowerCamelCase)
UpperCAmelCase__ : Dict = model(np.ones((1, 1)))
self.assertIsNotNone(_lowerCamelCase)
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Any = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""")
UpperCAmelCase__ : Optional[Any] = jnp.array([[0, 1, 2, 3, 4, 5]])
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)[0]
UpperCAmelCase__ : Any = 5_0000
UpperCAmelCase__ : Optional[int] = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1e-4)) | 407 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__A ={
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _UpperCamelCase ( UpperCamelCase__ ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if args.student_type == "roberta":
UpperCAmelCase__ : Optional[Any] = False
elif args.student_type == "gpt2":
UpperCAmelCase__ : str = False
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if args.student_type == "roberta":
UpperCAmelCase__ : str = False
def _UpperCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=UpperCamelCase__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=UpperCamelCase__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=UpperCamelCase__ , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=UpperCamelCase__ , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=UpperCamelCase__ , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=UpperCamelCase__ , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=UpperCamelCase__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=UpperCamelCase__ , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=UpperCamelCase__ , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=UpperCamelCase__ , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=UpperCamelCase__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=UpperCamelCase__ , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=UpperCamelCase__ , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=UpperCamelCase__ , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=UpperCamelCase__ , default=5_0 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=UpperCamelCase__ , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=UpperCamelCase__ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5e-4 , type=UpperCamelCase__ , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=UpperCamelCase__ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=UpperCamelCase__ , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=UpperCamelCase__ , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=UpperCamelCase__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=UpperCamelCase__ , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=UpperCamelCase__ , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=UpperCamelCase__ , default=5_6 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=UpperCamelCase__ , default=5_0_0 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=UpperCamelCase__ , default=4_0_0_0 , help="""Checkpoint interval.""" )
UpperCAmelCase__ : Dict = parser.parse_args()
sanity_checks(UpperCamelCase__ )
# ARGS #
init_gpu_params(UpperCamelCase__ )
set_seed(UpperCamelCase__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(UpperCamelCase__ ) , UpperCamelCase__ , indent=4 )
git_log(args.dump_path )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = MODEL_CLASSES[args.student_type]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase__ : Optional[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase__ : Any = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase__ : Any = tokenizer.all_special_tokens.index(UpperCamelCase__ )
UpperCAmelCase__ : str = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
UpperCAmelCase__ : Any = special_tok_ids
UpperCAmelCase__ : str = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , """rb""" ) as fp:
UpperCAmelCase__ : str = pickle.load(UpperCamelCase__ )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , """rb""" ) as fp:
UpperCAmelCase__ : int = pickle.load(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = np.maximum(UpperCamelCase__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase__ : Tuple = 0.0 # do not predict special tokens
UpperCAmelCase__ : Any = torch.from_numpy(UpperCamelCase__ )
else:
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[int] = LmSeqsDataset(params=UpperCamelCase__ , data=UpperCamelCase__ )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
UpperCAmelCase__ : int = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase__ : Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
UpperCAmelCase__ : str = student_model_class.from_pretrained(args.student_pretrained_weights , config=UpperCamelCase__ )
else:
UpperCAmelCase__ : Any = student_model_class(UpperCamelCase__ )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info("""Student loaded.""" )
# TEACHER #
UpperCAmelCase__ : int = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=UpperCamelCase__ )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(UpperCamelCase__ , UpperCamelCase__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(UpperCamelCase__ , UpperCamelCase__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase__ : Optional[Any] = Distiller(
params=UpperCamelCase__ , dataset=UpperCamelCase__ , token_probs=UpperCamelCase__ , student=UpperCamelCase__ , teacher=UpperCamelCase__ )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main() | 407 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a , _a = analyze_text(UpperCamelCase )
_a = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
_a = sum(single_char_strings.values() )
# one length string
_a = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_a = single_char_strings[ch]
_a = my_str / all_sum
my_fir_sum += prob * math.loga(UpperCamelCase ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
_a = sum(two_char_strings.values() )
_a = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_a = cha + cha
if sequence in two_char_strings:
_a = two_char_strings[sequence]
_a = int(UpperCamelCase ) / all_sum
my_sec_sum += prob * math.loga(UpperCamelCase )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def snake_case_ (UpperCamelCase : str ):
'''simple docstring'''
_a = Counter() # type: ignore
_a = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(UpperCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def snake_case_ ():
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 377 |
'''simple docstring'''
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_snake_case : List[str] = '\nimport os\n'
_snake_case : Dict = '\ndef foo():\n import os\n return False\n'
_snake_case : List[Any] = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
_snake_case : Dict = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
_snake_case : Optional[Any] = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
_snake_case : Optional[Any] = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
_snake_case : Any = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
_snake_case : List[str] = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
_snake_case : Dict = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
_snake_case : Any = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
_snake_case : Union[str, Any] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , UpperCamelCase )
def snake_case_ (UpperCamelCase : str , UpperCamelCase : List[Any] ):
'''simple docstring'''
_a = os.path.join(UpperCamelCase , '''test_file.py''' )
with open(UpperCamelCase , '''w''' ) as _tmp_file:
_tmp_file.write(UpperCamelCase )
_a = get_imports(UpperCamelCase )
assert parsed_imports == ["os"]
| 377 | 1 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> Dict:
"""simple docstring"""
if isinstance(lowerCamelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase_ :
def snake_case ( self ,snake_case__ ,snake_case__ ):
pass
def snake_case ( self ):
pass
def snake_case ( self ):
pass
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = np.abs((a - b) ).max()
self.assertLessEqual(snake_case__ ,snake_case__ ,F'Difference between torch and flax is {diff} (>= {tol}).' )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = FlaxVisionTextDualEncoderModel(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = model(input_ids=snake_case__ ,pixel_values=snake_case__ ,attention_mask=snake_case__ )
self.assertEqual(output['text_embeds'].shape ,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape ,(pixel_values.shape[0], config.projection_dim) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.get_vision_text_model(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(input_ids=snake_case__ ,pixel_values=snake_case__ ,attention_mask=snake_case__ )
self.assertEqual(output['text_embeds'].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_vision_text_model(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = model(input_ids=snake_case__ ,pixel_values=snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = model(input_ids=snake_case__ ,pixel_values=snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = after_output[0]
SCREAMING_SNAKE_CASE_ : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ ,1E-3 )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_vision_text_model(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(
input_ids=snake_case__ ,pixel_values=snake_case__ ,attention_mask=snake_case__ ,output_attentions=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = output.vision_model_output.attentions
self.assertEqual(len(snake_case__ ) ,vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ : Tuple = to_atuple(vision_model.config.image_size )
SCREAMING_SNAKE_CASE_ : int = to_atuple(vision_model.config.patch_size )
SCREAMING_SNAKE_CASE_ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE_ : str = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
SCREAMING_SNAKE_CASE_ : str = output.text_model_output.attentions
self.assertEqual(len(snake_case__ ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
pt_model.to(snake_case__ )
pt_model.eval()
# prepare inputs
SCREAMING_SNAKE_CASE_ : int = inputs_dict
SCREAMING_SNAKE_CASE_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = pt_model(**snake_case__ ).to_tuple()
SCREAMING_SNAKE_CASE_ : Optional[Any] = fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) ,len(snake_case__ ) ,'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(snake_case__ ,pt_output.numpy() ,4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ ,from_pt=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = fx_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) ,len(snake_case__ ) ,'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(snake_case__ ,pt_output.numpy() ,4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = VisionTextDualEncoderModel.from_pretrained(snake_case__ ,from_flax=snake_case__ )
pt_model_loaded.to(snake_case__ )
pt_model_loaded.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pt_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) ,len(snake_case__ ) ,'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] ,pt_outputs_loaded[:4] ):
self.assert_almost_equals(snake_case__ ,pt_output_loaded.numpy() ,4E-2 )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = VisionTextDualEncoderModel(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxVisionTextDualEncoderModel(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = fx_state
self.check_pt_flax_equivalence(snake_case__ ,snake_case__ ,snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = VisionTextDualEncoderModel(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxVisionTextDualEncoderModel(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = load_flax_weights_in_pytorch_model(snake_case__ ,fx_model.params )
self.check_pt_flax_equivalence(snake_case__ ,snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
self.check_save_load(**snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**snake_case__ )
@is_pt_flax_cross_test
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config_inputs_dict.pop('vision_config' )
SCREAMING_SNAKE_CASE_ : Tuple = config_inputs_dict.pop('text_config' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(snake_case__ ,snake_case__ ,snake_case__ )
self.check_equivalence_flax_to_pt(snake_case__ ,snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_pretrained_model_and_inputs()
SCREAMING_SNAKE_CASE_ : Any = model_a(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = model_a(**snake_case__ )
SCREAMING_SNAKE_CASE_ : int = after_outputs[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ ,1E-5 )
@require_flax
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' ,'hf-internal-testing/tiny-bert' ,vision_from_pt=snake_case__ ,text_from_pt=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : List[Any] = 13
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE_ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = FlaxViTModel(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = FlaxBertModel(snake_case__ )
return vision_model, text_model
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE_ : int = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : List[str] = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' ,'hf-internal-testing/tiny-bert' ,vision_from_pt=snake_case__ ,text_from_pt=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE_ : int = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE_ : Any = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = FlaxCLIPVisionModel(snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = FlaxBertModel(snake_case__ )
return vision_model, text_model
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = FlaxCLIPVisionModelTester(self )
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Optional[int] = clip_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' ,logit_scale_init_value=1.0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
SCREAMING_SNAKE_CASE_ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE_ : Optional[int] = processor(
text=['una foto di un gatto', 'una foto di un cane'] ,images=snake_case__ ,padding=snake_case__ ,return_tensors='np' )
SCREAMING_SNAKE_CASE_ : List[Any] = model(**snake_case__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
SCREAMING_SNAKE_CASE_ : Dict = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image ,snake_case__ ,atol=1E-3 ) )
| 105 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__UpperCamelCase : Optional[Any] = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : str ,lowercase_ : Optional[Any] ,lowercase_ : Optional[Any]=1_6 ,lowercase_ : int=1_3 ,lowercase_ : Optional[int]=7 ,lowercase_ : int=1_4 ,lowercase_ : str=1_0 ,lowercase_ : List[Any]=1_9 ,lowercase_ : Any=5 ,lowercase_ : Any=4 ,lowercase_ : List[str]=True ,lowercase_ : Union[str, Any]=1_6 ,lowercase_ : Tuple=2 ,lowercase_ : str=4 ,lowercase_ : Tuple=4 ,lowercase_ : int="gelu" ,lowercase_ : Any=0.1 ,lowercase_ : int=0.1 ,lowercase_ : Optional[int]=[1, 2, 3, 4, 5] ,lowercase_ : List[Any]=2_5 ,lowercase_ : Union[str, Any]=5 ,):
lowerCAmelCase__ : List[str] = d_model
lowerCAmelCase__ : Optional[Any] = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : Any = prediction_length
lowerCAmelCase__ : str = context_length
lowerCAmelCase__ : int = cardinality
lowerCAmelCase__ : Dict = num_time_features
lowerCAmelCase__ : str = lags_sequence
lowerCAmelCase__ : int = embedding_dimension
lowerCAmelCase__ : Dict = is_training
lowerCAmelCase__ : Optional[Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : List[str] = hidden_act
lowerCAmelCase__ : Dict = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Tuple = context_length
lowerCAmelCase__ : Optional[Any] = prediction_length + label_length
lowerCAmelCase__ : Union[str, Any] = label_length
lowerCAmelCase__ : Optional[int] = moving_average
lowerCAmelCase__ : Dict = autocorrelation_factor
def __lowerCAmelCase ( self : Tuple ):
return AutoformerConfig(
d_model=self.d_model ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,prediction_length=self.prediction_length ,context_length=self.context_length ,label_length=self.label_length ,lags_sequence=self.lags_sequence ,num_time_features=self.num_time_features ,num_static_categorical_features=1 ,cardinality=[self.cardinality] ,embedding_dimension=[self.embedding_dimension] ,moving_average=self.moving_average ,)
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : str ):
lowerCAmelCase__ : str = config.context_length + max(config.lags_sequence )
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, 1] ,config.cardinality[0] )
lowerCAmelCase__ : int = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
lowerCAmelCase__ : Any = floats_tensor([self.batch_size, _past_length] )
lowerCAmelCase__ : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
lowerCAmelCase__ : List[str] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
lowerCAmelCase__ : Tuple = floats_tensor([self.batch_size, config.prediction_length] )
lowerCAmelCase__ : Optional[int] = {
'''past_values''': past_values,
'''static_categorical_features''': static_categorical_features,
'''past_time_features''': past_time_features,
'''past_observed_mask''': past_observed_mask,
'''future_time_features''': future_time_features,
'''future_values''': future_values,
}
return inputs_dict
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Dict = self.get_config()
lowerCAmelCase__ : Dict = self.prepare_autoformer_inputs_dict(lowercase_ )
return config, inputs_dict
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self : Dict ,lowercase_ : Optional[int] ,lowercase_ : Union[str, Any] ):
lowerCAmelCase__ : Any = AutoformerModel(config=lowercase_ ).to(lowercase_ ).eval()
lowerCAmelCase__ : Tuple = model(**lowercase_ )
lowerCAmelCase__ : Any = outputs.encoder_last_hidden_state
lowerCAmelCase__ : str = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : List[Any] = model.get_encoder()
encoder.save_pretrained(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = AutoformerEncoder.from_pretrained(lowercase_ ).to(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : str = model.create_network_inputs(**lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
lowerCAmelCase__ : int = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) ,dim=-1 ,)
lowerCAmelCase__ : Dict = encoder(inputs_embeds=lowercase_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
lowerCAmelCase__ : Optional[int] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] ,dim=1 )
.unsqueeze(1 )
.repeat(1 ,config.prediction_length ,1 )
)
lowerCAmelCase__ : Optional[Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] ,device=enc_input.device ,)
lowerCAmelCase__ : List[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) ,dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) ,dim=-1 ,)
lowerCAmelCase__ : Any = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) ,dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) ,dim=-1 ,)
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Optional[int] = model.get_decoder()
decoder.save_pretrained(lowercase_ )
lowerCAmelCase__ : Tuple = AutoformerDecoder.from_pretrained(lowercase_ ).to(lowercase_ )
lowerCAmelCase__ : Union[str, Any] = decoder(
trend=lowercase_ ,inputs_embeds=lowercase_ ,encoder_hidden_states=lowercase_ ,)[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
lowercase__ = (AutoformerForPrediction,) if is_torch_available() else ()
lowercase__ = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : int = AutoformerModelTester(self )
lowerCAmelCase__ : Optional[int] = ConfigTester(self ,config_class=lowercase_ ,has_text_modality=lowercase_ )
def __lowerCAmelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[Any] = model_class(lowercase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = model_class.from_pretrained(lowercase_ ,output_loading_info=lowercase_ )
self.assertEqual(info['''missing_keys'''] ,[] )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowercase_ )
@unittest.skip(reason='''Model has no tokens embeddings''' )
def __lowerCAmelCase ( self : Optional[int] ):
pass
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : List[str] = inspect.signature(getattr(lowercase_ ,'''forward''' ) )
# The main input is the name of the argument after `self`
lowerCAmelCase__ : Any = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name ,lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(lowercase_ )
lowerCAmelCase__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : int = [*signature.parameters.keys()]
lowerCAmelCase__ : Optional[int] = [
'''past_values''',
'''past_time_features''',
'''past_observed_mask''',
'''static_categorical_features''',
'''static_real_features''',
'''future_values''',
'''future_time_features''',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('''future_observed_mask''' )
expected_arg_names.extend(
[
'''decoder_attention_mask''',
'''head_mask''',
'''decoder_head_mask''',
'''cross_attn_head_mask''',
'''encoder_outputs''',
'''past_key_values''',
'''output_hidden_states''',
'''output_attentions''',
'''use_cache''',
'''return_dict''',
] )
self.assertListEqual(arg_names[: len(lowercase_ )] ,lowercase_ )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ ,lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : int = getattr(self.model_tester ,'''seq_length''' ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = getattr(self.model_tester ,'''decoder_seq_length''' ,lowercase_ )
lowerCAmelCase__ : Tuple = getattr(self.model_tester ,'''encoder_seq_length''' ,lowercase_ )
lowerCAmelCase__ : List[Any] = getattr(self.model_tester ,'''d_model''' ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = getattr(self.model_tester ,'''num_attention_heads''' ,lowercase_ )
lowerCAmelCase__ : str = d_model // num_attention_heads
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Any = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : str = model(**self._prepare_for_class(lowercase_ ,lowercase_ ) )
lowerCAmelCase__ : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase_ ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Union[str, Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Dict = model(**self._prepare_for_class(lowercase_ ,lowercase_ ) )
lowerCAmelCase__ : int = outputs.encoder_attentions
self.assertEqual(len(lowercase_ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, dim] ,)
lowerCAmelCase__ : Tuple = len(lowercase_ )
lowerCAmelCase__ : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowercase_ ,lowercase_ )
# decoder attentions
lowerCAmelCase__ : str = outputs.decoder_attentions
self.assertIsInstance(lowercase_ ,(list, tuple) )
self.assertEqual(len(lowercase_ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, decoder_seq_length, dim] ,)
# cross attentions
lowerCAmelCase__ : Any = outputs.cross_attentions
self.assertIsInstance(lowercase_ ,(list, tuple) )
self.assertEqual(len(lowercase_ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, decoder_seq_length, dim] ,)
# Check attention is always last and order is fine
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(lowercase_ ,lowercase_ ) )
self.assertEqual(out_len + 2 ,len(lowercase_ ) )
lowerCAmelCase__ : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase_ ) ,self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, dim] ,)
@is_flaky()
def __lowerCAmelCase ( self : str ):
super().test_retain_grad_hidden_states_attentions()
def __SCREAMING_SNAKE_CASE ( A_="train-batch.pt" ):
lowerCAmelCase__ : Any = hf_hub_download(repo_id='''hf-internal-testing/tourism-monthly-batch''' , filename=A_ , repo_type='''dataset''' )
lowerCAmelCase__ : Union[str, Any] = torch.load(A_ , map_location=A_ )
return batch
@require_torch
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : List[Any] = AutoformerModel.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(lowercase_ )
lowerCAmelCase__ : int = prepare_batch()
with torch.no_grad():
lowerCAmelCase__ : Any = model(
past_values=batch['''past_values'''] ,past_time_features=batch['''past_time_features'''] ,past_observed_mask=batch['''past_observed_mask'''] ,static_categorical_features=batch['''static_categorical_features'''] ,future_values=batch['''future_values'''] ,future_time_features=batch['''future_time_features'''] ,)[0]
lowerCAmelCase__ : Dict = torch.Size(
(6_4, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape ,lowercase_ )
lowerCAmelCase__ : Optional[int] = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] ,device=lowercase_ )
self.assertTrue(torch.allclose(output[0, :3, :3] ,lowercase_ ,atol=lowercase_ ) )
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : List[Any] = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(lowercase_ )
lowerCAmelCase__ : Optional[int] = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
lowerCAmelCase__ : str = model(
past_values=batch['''past_values'''] ,past_time_features=batch['''past_time_features'''] ,past_observed_mask=batch['''past_observed_mask'''] ,static_categorical_features=batch['''static_categorical_features'''] ,).encoder_last_hidden_state
lowerCAmelCase__ : Any = torch.Size((6_4, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape ,lowercase_ )
lowerCAmelCase__ : Optional[int] = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] ,device=lowercase_ )
self.assertTrue(torch.allclose(output[0, :3, :3] ,lowercase_ ,atol=lowercase_ ) )
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : Union[str, Any] = AutoformerForPrediction.from_pretrained('''huggingface/autoformer-tourism-monthly''' ).to(lowercase_ )
lowerCAmelCase__ : Dict = prepare_batch('''val-batch.pt''' )
with torch.no_grad():
lowerCAmelCase__ : int = model.generate(
static_categorical_features=batch['''static_categorical_features'''] ,past_time_features=batch['''past_time_features'''] ,past_values=batch['''past_values'''] ,future_time_features=batch['''future_time_features'''] ,past_observed_mask=batch['''past_observed_mask'''] ,)
lowerCAmelCase__ : List[Any] = torch.Size((6_4, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] ,device=lowercase_ )
lowerCAmelCase__ : Dict = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] ,lowercase_ ,rtol=1E-1 ) )
| 450 | 0 |
def _UpperCamelCase ( lowerCAmelCase_ = 2_0_0_0_0_0_0 ) ->int:
UpperCAmelCase = [0 for i in range(n + 1 )]
UpperCAmelCase = 1
UpperCAmelCase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , lowerCAmelCase_ ):
UpperCAmelCase = 1
UpperCAmelCase = 0
for i in range(lowerCAmelCase_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 627 |
from math import sqrt
def _UpperCamelCase ( lowerCAmelCase_ = 1_0_0_0_0_0_0 ) ->int:
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(lowerCAmelCase_ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""")
| 627 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
A = "\nHuman: <<task>>\n\nAssistant: "
A = "huggingface-tools/default-prompts"
A = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def __UpperCAmelCase ( __A , __A , __A="run" ) -> Union[str, Any]:
'''simple docstring'''
if prompt_or_repo_id is None:
UpperCAmelCase__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , __A ) is not None:
return prompt_or_repo_id
UpperCAmelCase__ = cached_file(
__A , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(__A , "r" , encoding="utf-8" ) as f:
return f.read()
| 475 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowercase__ ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = load_tool("text-to-speech" )
self.tool.setup()
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = self.tool("hey" )
UpperCAmelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase__ = self.tool("hey" )
UpperCAmelCase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 475 | 1 |
from __future__ import annotations
def _lowerCAmelCase( __A ): # This function is recursive
UpperCAmelCase = len(__snake_case )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
UpperCAmelCase = array[0]
UpperCAmelCase = False
UpperCAmelCase = 1
UpperCAmelCase = []
while not is_found and i < array_length:
if array[i] < pivot:
UpperCAmelCase = True
UpperCAmelCase = [element for element in array[i:] if element >= array[i]]
UpperCAmelCase = longest_subsequence(__snake_case )
if len(__snake_case ) > len(__snake_case ):
UpperCAmelCase = temp_array
else:
i += 1
UpperCAmelCase = [element for element in array[1:] if element >= pivot]
UpperCAmelCase = [pivot, *longest_subsequence(__snake_case )]
if len(__snake_case ) > len(__snake_case ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase( __A ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __A ).groups()[0]
class __magic_name__ ( _snake_case ):
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=None ) -> Optional[Any]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self : Tuple ) -> List[str]:
return len(self.file_names )
def __getitem__( self : Optional[int] , lowerCAmelCase__ : Tuple ) -> Dict:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowerCAmelCase__ )
UpperCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowerCAmelCase__ )
UpperCAmelCase = extract_label(lowerCAmelCase__ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase( __A , __A ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config["lr"]
UpperCAmelCase = int(config["num_epochs"] )
UpperCAmelCase = int(config["seed"] )
UpperCAmelCase = int(config["batch_size"] )
UpperCAmelCase = config["image_size"]
if not isinstance(__A , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(__A )[-1].split("." )[0]
accelerator.init_trackers(__A , __A )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
UpperCAmelCase = [extract_label(__A ) for fname in file_names]
UpperCAmelCase = list(set(__A ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(__A )}
# Set the seed before splitting the data.
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(__A ) )
UpperCAmelCase = int(0.8 * len(__A ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(__A ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model("resnet50d" , pretrained=__A , num_classes=len(__A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(__A )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace("step_" , "" ) )
UpperCAmelCase = resume_step // len(__A )
resume_step -= starting_epoch * len(__A )
# Now we train the model
for epoch in range(__A , __A ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(__A , __A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
UpperCAmelCase = model(__A )
UpperCAmelCase = torch.nn.functional.cross_entropy(__A , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__A , __A ):
UpperCAmelCase = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(__A )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__A ),
"epoch": epoch,
} , step=__A , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"epoch_{epoch}"
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase( ):
UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__A , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__A , default=__A , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__A , default=__A , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__A , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 1 | 0 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__lowercase = None
__lowercase = logging.get_logger(__name__)
__lowercase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
__lowercase = {
'''t5-small''': 5_1_2,
'''t5-base''': 5_1_2,
'''t5-large''': 5_1_2,
'''t5-3b''': 5_1_2,
'''t5-11b''': 5_1_2,
}
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
UpperCAmelCase_ : Any = TaTokenizer
UpperCAmelCase_ : List[int] = []
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase=100 , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
lowerCAmelCase = [f"<extra_id_{i}>" for i in range(__lowerCAmelCase)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowerCAmelCase = len(set(filter(lambda __lowerCAmelCase: bool("""extra_id_""" in str(__lowerCAmelCase)) , __lowerCAmelCase)))
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""")
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , extra_ids=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = vocab_file
lowerCAmelCase = False if not self.vocab_file else True
lowerCAmelCase = extra_ids
@staticmethod
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowerCAmelCase = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f" {pretrained_model_name_or_path} automatically truncating your input to"
f" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
f" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __lowerCAmelCase , )
return max_model_length
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""")
if not os.path.isdir(__lowerCAmelCase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
lowerCAmelCase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(__lowerCAmelCase):
copyfile(self.vocab_file , __lowerCAmelCase)
logger.info(f"Copy vocab file to {out_vocab_file}")
return (out_vocab_file,)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowerCAmelCase = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = None):
"""simple docstring"""
lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def a_ ( self):
"""simple docstring"""
return list(
set(filter(lambda __lowerCAmelCase: bool(re.search(r"""<extra_id_\d+>""" , __lowerCAmelCase)) is not None , self.additional_special_tokens)))
def a_ ( self):
"""simple docstring"""
return [self.convert_tokens_to_ids(__lowerCAmelCase) for token in self.get_sentinel_tokens()]
| 370 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ['''ChineseCLIPFeatureExtractor''']
__lowercase = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370 | 1 |
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
a_ = True
from torch.cuda.amp import autocast
a_ = logging.getLogger(__name__)
def lowerCamelCase__ ( _a=None , _a=None):
return field(default_factory=lambda: default , metadata=_a)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowerCamelCase__ =field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
lowerCamelCase__ =field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
lowerCamelCase__ =field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
lowerCamelCase__ =field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
lowerCamelCase__ =field(
default=0.05 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
lowerCamelCase__ =field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__A , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase__ =field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
lowerCamelCase__ =field(
default=__A , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
lowerCamelCase__ =field(
default=__A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase__ =field(
default=__A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
lowerCamelCase__ =list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =True
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
lowerCamelCase__ =None
def __call__( self : str , a : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [{"input_values": feature["input_values"]} for feature in features]
SCREAMING_SNAKE_CASE : int = [{"input_ids": feature["labels"]} for feature in features]
SCREAMING_SNAKE_CASE : Any = self.processor.pad(
a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
SCREAMING_SNAKE_CASE : str = self.processor.pad(
labels=a , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors="pt" , )
# replace padding with -100 to ignore loss correctly
SCREAMING_SNAKE_CASE : str = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
SCREAMING_SNAKE_CASE : Optional[Any] = labels
return batch
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __UpperCamelCase ( self : Dict , a : nn.Module , a : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
"""simple docstring"""
model.train()
SCREAMING_SNAKE_CASE : List[Any] = self._prepare_inputs(a )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE : List[str] = self.compute_loss(a , a )
else:
SCREAMING_SNAKE_CASE : Any = self.compute_loss(a , a )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE : Dict = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE : Tuple = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(F"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE : str = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(a ).backward()
elif self.use_apex:
with amp.scale_loss(a , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(a )
else:
loss.backward()
return loss.detach()
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : List[Any] = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Any = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome.")
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch.")
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}")
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _a)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets:
SCREAMING_SNAKE_CASE : Dict = datasets.load_dataset(
"common_voice" , data_args.dataset_config_name , split=data_args.train_split_name)
SCREAMING_SNAKE_CASE : Tuple = datasets.load_dataset("common_voice" , data_args.dataset_config_name , split="test")
# Create and save tokenizer
SCREAMING_SNAKE_CASE : Union[str, Any] = f"[{''.join(data_args.chars_to_ignore)}]"
def remove_special_characters(_a):
SCREAMING_SNAKE_CASE : int = re.sub(_a , "" , batch["sentence"]).lower() + " "
return batch
SCREAMING_SNAKE_CASE : Union[str, Any] = train_dataset.map(_a , remove_columns=["sentence"])
SCREAMING_SNAKE_CASE : Any = eval_dataset.map(_a , remove_columns=["sentence"])
def extract_all_chars(_a):
SCREAMING_SNAKE_CASE : str = " ".join(batch["text"])
SCREAMING_SNAKE_CASE : Union[str, Any] = list(set(_a))
return {"vocab": [vocab], "all_text": [all_text]}
SCREAMING_SNAKE_CASE : List[str] = train_dataset.map(
_a , batched=_a , batch_size=-1 , keep_in_memory=_a , remove_columns=train_dataset.column_names , )
SCREAMING_SNAKE_CASE : Optional[int] = train_dataset.map(
_a , batched=_a , batch_size=-1 , keep_in_memory=_a , remove_columns=eval_dataset.column_names , )
SCREAMING_SNAKE_CASE : Tuple = list(set(vocab_train["vocab"][0]) | set(vocab_test["vocab"][0]))
SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in enumerate(_a)}
SCREAMING_SNAKE_CASE : Any = vocab_dict[" "]
del vocab_dict[" "]
SCREAMING_SNAKE_CASE : Dict = len(_a)
SCREAMING_SNAKE_CASE : int = len(_a)
with open("vocab.json" , "w") as vocab_file:
json.dump(_a , _a)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE : List[Any] = WavaVecaCTCTokenizer(
"vocab.json" , unk_token="[UNK]" , pad_token="[PAD]" , word_delimiter_token="|" , )
SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=_a , return_attention_mask=_a)
SCREAMING_SNAKE_CASE : List[str] = WavaVecaProcessor(feature_extractor=_a , tokenizer=_a)
SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction="mean" , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer) , )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = min(len(_a) , data_args.max_train_samples)
SCREAMING_SNAKE_CASE : int = train_dataset.select(range(_a))
if data_args.max_val_samples is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = eval_dataset.select(range(data_args.max_val_samples))
SCREAMING_SNAKE_CASE : str = torchaudio.transforms.Resample(48000 , 16000)
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(_a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = torchaudio.load(batch["path"])
SCREAMING_SNAKE_CASE : List[str] = resampler(_a).squeeze().numpy()
SCREAMING_SNAKE_CASE : List[str] = 16000
SCREAMING_SNAKE_CASE : Union[str, Any] = batch["text"]
return batch
SCREAMING_SNAKE_CASE : Union[str, Any] = train_dataset.map(
_a , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE : Any = eval_dataset.map(
_a , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(_a):
# check that all files have the correct sampling rate
assert (
len(set(batch["sampling_rate"])) == 1
), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
SCREAMING_SNAKE_CASE : Tuple = processor(
audio=batch["speech"] , text=batch["target_text"] , sampling_rate=batch["sampling_rate"][0])
batch.update(_a)
return batch
SCREAMING_SNAKE_CASE : Tuple = train_dataset.map(
_a , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_a , num_proc=data_args.preprocessing_num_workers , )
SCREAMING_SNAKE_CASE : Optional[int] = eval_dataset.map(
_a , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=_a , num_proc=data_args.preprocessing_num_workers , )
# Metric
SCREAMING_SNAKE_CASE : Tuple = datasets.load_metric("wer")
def compute_metrics(_a):
SCREAMING_SNAKE_CASE : Any = pred.predictions
SCREAMING_SNAKE_CASE : Tuple = np.argmax(_a , axis=-1)
SCREAMING_SNAKE_CASE : Optional[Any] = processor.tokenizer.pad_token_id
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.batch_decode(_a)
# we do not want to group tokens when computing the metrics
SCREAMING_SNAKE_CASE : Dict = processor.batch_decode(pred.label_ids , group_tokens=_a)
SCREAMING_SNAKE_CASE : Any = wer_metric.compute(predictions=_a , references=_a)
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
SCREAMING_SNAKE_CASE : str = DataCollatorCTCWithPadding(processor=_a , padding=_a)
# Initialize our Trainer
SCREAMING_SNAKE_CASE : Dict = CTCTrainer(
model=_a , data_collator=_a , args=_a , compute_metrics=_a , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path):
SCREAMING_SNAKE_CASE : Optional[Any] = model_args.model_name_or_path
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank):
processor.save_pretrained(training_args.output_dir)
SCREAMING_SNAKE_CASE : Optional[Any] = trainer.train(resume_from_checkpoint=_a)
trainer.save_model()
SCREAMING_SNAKE_CASE : Tuple = train_result.metrics
SCREAMING_SNAKE_CASE : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_a)
)
SCREAMING_SNAKE_CASE : Optional[int] = min(_a , len(_a))
trainer.log_metrics("train" , _a)
trainer.save_metrics("train" , _a)
trainer.save_state()
# Evaluation
SCREAMING_SNAKE_CASE : Tuple = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
SCREAMING_SNAKE_CASE : int = trainer.evaluate()
SCREAMING_SNAKE_CASE : Optional[int] = data_args.max_val_samples if data_args.max_val_samples is not None else len(_a)
SCREAMING_SNAKE_CASE : Dict = min(_a , len(_a))
trainer.log_metrics("eval" , _a)
trainer.save_metrics("eval" , _a)
return results
if __name__ == "__main__":
main() | 193 |
from __future__ import annotations
def lowerCamelCase__ ( _a , _a):
if b == 0:
return (1, 0)
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Tuple = extended_euclid(_a , a % b)
SCREAMING_SNAKE_CASE : Dict = a // b
return (y, x - k * y)
def lowerCamelCase__ ( _a , _a , _a , _a):
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : str = extended_euclid(_a , _a)
SCREAMING_SNAKE_CASE : str = na * na
SCREAMING_SNAKE_CASE : int = ra * x * na + ra * y * na
return (n % m + m) % m
def lowerCamelCase__ ( _a , _a):
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : List[str] = extended_euclid(_a , _a)
if b < 0:
SCREAMING_SNAKE_CASE : int = (b % n + n) % n
return b
def lowerCamelCase__ ( _a , _a , _a , _a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = invert_modulo(_a , _a), invert_modulo(_a , _a)
SCREAMING_SNAKE_CASE : Optional[int] = na * na
SCREAMING_SNAKE_CASE : Optional[Any] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True) | 193 | 1 |
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
__a : Dict = [0] * len(_SCREAMING_SNAKE_CASE )
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
# use last results for better performance - dynamic programming
__a : Any = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__a : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__a : Optional[Any] = j
return prefix_result
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
return max(prefix_function(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 476 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int = 3 ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(_SCREAMING_SNAKE_CASE ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
__a : Dict = QuantumRegister(_SCREAMING_SNAKE_CASE , 'qr' )
__a : List[str] = ClassicalRegister(_SCREAMING_SNAKE_CASE , 'cr' )
__a : List[Any] = QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a : Optional[int] = number_of_qubits
for i in range(_SCREAMING_SNAKE_CASE ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_SCREAMING_SNAKE_CASE ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_SCREAMING_SNAKE_CASE , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# simulate with 10000 shots
__a : Union[str, Any] = Aer.get_backend('qasm_simulator' )
__a : Any = execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=10_000 )
return job.result().get_counts(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 476 | 1 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
enable_full_determinism()
class __a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : str = UNetaDModel
UpperCamelCase_ : Optional[Any] = '''sample'''
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Dict:
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (32, 32)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
UpperCamelCase = torch.tensor([10] ).to(UpperCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> int:
"""simple docstring"""
return (3, 32, 32)
@property
def _SCREAMING_SNAKE_CASE ( self : int )-> List[str]:
"""simple docstring"""
return (3, 32, 32)
def _SCREAMING_SNAKE_CASE ( self : int )-> List[Any]:
"""simple docstring"""
UpperCamelCase = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
class __a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[int] = UNetaDModel
UpperCamelCase_ : Any = '''sample'''
@property
def _SCREAMING_SNAKE_CASE ( self : str )-> int:
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 4
UpperCamelCase = (32, 32)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
UpperCamelCase = torch.tensor([10] ).to(UpperCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Dict:
"""simple docstring"""
return (4, 32, 32)
@property
def _SCREAMING_SNAKE_CASE ( self : Dict )-> Optional[Any]:
"""simple docstring"""
return (4, 32, 32)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCamelCase = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase_ )
UpperCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> List[str]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
UpperCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def _SCREAMING_SNAKE_CASE ( self : int )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ )
model_accelerate.to(UpperCAmelCase_ )
model_accelerate.eval()
UpperCamelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase = noise.to(UpperCAmelCase_ )
UpperCamelCase = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase_ )
UpperCamelCase = model_accelerate(UpperCAmelCase_ , UpperCAmelCase_ )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=UpperCAmelCase_ , low_cpu_mem_usage=UpperCAmelCase_ )
model_normal_load.to(UpperCAmelCase_ )
model_normal_load.eval()
UpperCamelCase = model_normal_load(UpperCAmelCase_ , UpperCAmelCase_ )["sample"]
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-3 )
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> List[str]:
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(UpperCAmelCase_ )
UpperCamelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase = noise.to(UpperCAmelCase_ )
UpperCamelCase = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase_ )
with torch.no_grad():
UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ ).sample
UpperCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-3 ) )
class __a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] = UNetaDModel
UpperCamelCase_ : Tuple = '''sample'''
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase_ : Optional[int]=(32, 32) )-> Dict:
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
UpperCamelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCAmelCase_ )
return {"sample": noise, "timestep": time_step}
@property
def _SCREAMING_SNAKE_CASE ( self : str )-> str:
"""simple docstring"""
return (3, 32, 32)
@property
def _SCREAMING_SNAKE_CASE ( self : str )-> str:
"""simple docstring"""
return (3, 32, 32)
def _SCREAMING_SNAKE_CASE ( self : Any )-> List[str]:
"""simple docstring"""
UpperCamelCase = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1e-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> str:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(UpperCAmelCase_ )
UpperCamelCase = self.dummy_input
UpperCamelCase = floats_tensor((4, 3) + (256, 256) ).to(UpperCAmelCase_ )
UpperCamelCase = noise
UpperCamelCase = model(**UpperCAmelCase_ )
assert image is not None, "Make sure output is not None"
@slow
def _SCREAMING_SNAKE_CASE ( self : str )-> List[Any]:
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(UpperCAmelCase_ )
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (256, 256)
UpperCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
UpperCamelCase = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase_ )
with torch.no_grad():
UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ ).sample
UpperCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-2 ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(UpperCAmelCase_ )
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (32, 32)
UpperCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase_ )
UpperCamelCase = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase_ )
with torch.no_grad():
UpperCamelCase = model(UpperCAmelCase_ , UpperCAmelCase_ ).sample
UpperCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-2 ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
pass
| 717 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __a ( _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : str = FunnelTokenizer
UpperCamelCase_ : Any = FunnelTokenizerFast
UpperCamelCase_ : Union[str, Any] = True
UpperCamelCase_ : Tuple = True
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> Optional[int]:
"""simple docstring"""
super().setUp()
UpperCamelCase = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **UpperCAmelCase_ : Optional[Any] )-> str:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , **UpperCAmelCase_ : Any )-> Union[str, Any]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : Tuple )-> str:
"""simple docstring"""
UpperCamelCase = "UNwant\u00E9d,running"
UpperCamelCase = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : str )-> List[str]:
"""simple docstring"""
UpperCamelCase = self.tokenizer_class(self.vocab_file )
UpperCamelCase = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.get_tokenizers(do_lower_case=UpperCAmelCase_ )
for tokenizer in tokenizers:
UpperCamelCase = tokenizer("UNwant\u00E9d,running" )
UpperCamelCase = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
UpperCamelCase = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 556 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Dict = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''distilbert'''
UpperCAmelCase__ = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self : int , UpperCAmelCase__ : Dict=30_522 , UpperCAmelCase__ : Optional[Any]=512 , UpperCAmelCase__ : int=False , UpperCAmelCase__ : List[str]=6 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : Optional[int]=768 , UpperCAmelCase__ : List[Any]=4 * 768 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : int=0.02 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : List[str]=0.2 , UpperCAmelCase__ : List[str]=0 , **UpperCAmelCase__ : Optional[Any] , ) ->str:
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = sinusoidal_pos_embds
A__ = n_layers
A__ = n_heads
A__ = dim
A__ = hidden_dim
A__ = dropout
A__ = attention_dropout
A__ = activation
A__ = initializer_range
A__ = qa_dropout
A__ = seq_classif_dropout
super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE ( self : int) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 87 | """simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : int = IFInpaintingSuperResolutionPipeline
__magic_name__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__magic_name__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""})
__magic_name__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _UpperCAmelCase ( self : Union[str, Any] ):
return self._get_superresolution_dummy_components()
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=0 ):
if str(UpperCamelCase__ ).startswith("mps" ):
A__ : Any =torch.manual_seed(UpperCamelCase__ )
else:
A__ : Dict =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ : Tuple =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Any =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : List[str] ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCAmelCase ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _UpperCAmelCase ( self : int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _UpperCAmelCase ( self : Tuple ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _UpperCAmelCase ( self : str ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _UpperCAmelCase ( self : Dict ):
self._test_save_load_local()
def _UpperCAmelCase ( self : Optional[int] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 656 | 0 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCAmelCase : Tuple =sys.version_info >= (3, 10)
def A__ ( __A=None , __A=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=__A )
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = 42
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = 42
_snake_case = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = False
_snake_case = True
_snake_case = None
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'titi'
_snake_case = 'toto'
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
_snake_case = 'titi'
_snake_case = 'toto'
_snake_case = 42
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = 'toto'
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = BasicEnum(self.foo)
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = 'toto'
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = MixedTypeEnum(self.foo)
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = None
_snake_case = field(default=__lowerCAmelCase , metadata={'help': 'help message'} )
_snake_case = None
_snake_case = list_field(default=[] )
_snake_case = list_field(default=[] )
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = list_field(default=[] )
_snake_case = list_field(default=[1, 2, 3] )
_snake_case = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
_snake_case = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = field()
_snake_case = field()
_snake_case = field()
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Any:
"""simple docstring"""
_lowerCamelCase : Dict = BasicEnum(self.required_enum)
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = 42
_snake_case = field()
_snake_case = None
_snake_case = field(default='toto' , metadata={'help': 'help message'} )
_snake_case = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = False
_snake_case = True
_snake_case = None
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = None
_snake_case = field(default=__lowerCAmelCase , metadata={'help': 'help message'} )
_snake_case = None
_snake_case = list_field(default=[] )
_snake_case = list_field(default=[] )
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCamelCase : argparse.ArgumentParser , _UpperCamelCase : argparse.ArgumentParser) ->Tuple:
"""simple docstring"""
self.assertEqual(len(a._actions) , len(b._actions))
for x, y in zip(a._actions , b._actions):
_lowerCamelCase : Optional[Any] = {k: v for k, v in vars(_UpperCamelCase).items() if k != """container"""}
_lowerCamelCase : Any = {k: v for k, v in vars(_UpperCamelCase).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , _UpperCamelCase) and yy.get("""choices""" , _UpperCamelCase):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](_UpperCamelCase) , yy["""type"""](_UpperCamelCase))
del xx["type"], yy["type"]
self.assertEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : List[Any] = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_UpperCamelCase , required=_UpperCamelCase)
expected.add_argument("""--bar""" , type=_UpperCamelCase , required=_UpperCamelCase)
expected.add_argument("""--baz""" , type=_UpperCamelCase , required=_UpperCamelCase)
expected.add_argument("""--flag""" , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs="""?""")
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Any = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
(_lowerCamelCase ) : Tuple = parser.parse_args_into_dataclasses(_UpperCamelCase , look_for_args_file=_UpperCamelCase)
self.assertFalse(example.flag)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Any = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=_UpperCamelCase)
expected.add_argument("""--baz""" , default="""toto""" , type=_UpperCamelCase , help="""help message""")
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs="""?""")
expected.add_argument("""--baz""" , type=_UpperCamelCase , default=_UpperCamelCase , const=_UpperCamelCase , nargs="""?""")
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=_UpperCamelCase , dest="""baz""")
expected.add_argument("""--opt""" , type=_UpperCamelCase , default=_UpperCamelCase)
_lowerCamelCase : List[str] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase)
for dataclass_type in dataclass_types:
_lowerCamelCase : Optional[Any] = HfArgumentParser(_UpperCamelCase)
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : str = parser.parse_args([])
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase))
_lowerCamelCase : Any = parser.parse_args(["""--foo""", """--no_baz"""])
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase))
_lowerCamelCase : Union[str, Any] = parser.parse_args(["""--foo""", """--baz"""])
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase))
_lowerCamelCase : int = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""])
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase))
_lowerCamelCase : Optional[Any] = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""])
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , baz=_UpperCamelCase , opt=_UpperCamelCase))
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
_lowerCamelCase : int = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : Dict = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42]) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Dict = parser.parse_args([])
self.assertEqual(args.foo , """toto""")
_lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses([])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto)
_lowerCamelCase : Tuple = parser.parse_args(["""--foo""", """titi"""])
self.assertEqual(args.foo , """titi""")
_lowerCamelCase : int = parser.parse_args_into_dataclasses(["""--foo""", """titi"""])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi)
_lowerCamelCase : List[str] = parser.parse_args(["""--foo""", """42"""])
self.assertEqual(args.foo , 42)
_lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses(["""--foo""", """42"""])[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo)
def _SCREAMING_SNAKE_CASE ( self : int) ->Union[str, Any]:
"""simple docstring"""
@dataclass
class __snake_case :
'''simple docstring'''
_snake_case = 'toto'
_lowerCamelCase : List[Any] = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : Tuple = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42]) , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Tuple = parser.parse_args([])
self.assertEqual(args.foo , """toto""")
_lowerCamelCase : Any = parser.parse_args(["""--foo""", """titi"""])
self.assertEqual(args.foo , """titi""")
_lowerCamelCase : str = parser.parse_args(["""--foo""", """42"""])
self.assertEqual(args.foo , 42)
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : Any = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=_UpperCamelCase)
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=_UpperCamelCase)
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_UpperCamelCase)
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=_UpperCamelCase)
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = parser.parse_args([])
self.assertEqual(
_UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3]) , )
_lowerCamelCase : Any = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split())
self.assertEqual(_UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7]))
def _SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=_UpperCamelCase , type=_UpperCamelCase)
expected.add_argument("""--bar""" , default=_UpperCamelCase , type=_UpperCamelCase , help="""help message""")
expected.add_argument("""--baz""" , default=_UpperCamelCase , type=_UpperCamelCase)
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=_UpperCamelCase)
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=_UpperCamelCase)
_lowerCamelCase : str = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(_UpperCamelCase)
for dataclass_type in dataclass_types:
_lowerCamelCase : Tuple = HfArgumentParser(_UpperCamelCase)
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : int = parser.parse_args([])
self.assertEqual(_UpperCamelCase , Namespace(foo=_UpperCamelCase , bar=_UpperCamelCase , baz=_UpperCamelCase , ces=[] , des=[]))
_lowerCamelCase : List[Any] = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split())
self.assertEqual(_UpperCamelCase , Namespace(foo=12 , bar=3.1_4 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3]))
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : str = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=_UpperCamelCase , required=_UpperCamelCase)
expected.add_argument("""--required_str""" , type=_UpperCamelCase , required=_UpperCamelCase)
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""]) , choices=["""titi""", """toto"""] , required=_UpperCamelCase , )
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=_UpperCamelCase , required=_UpperCamelCase)
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""]) , choices=["""titi""", """toto"""] , required=_UpperCamelCase , )
expected.add_argument("""--opt""" , type=_UpperCamelCase , default=_UpperCamelCase)
expected.add_argument("""--baz""" , default="""toto""" , type=_UpperCamelCase , help="""help message""")
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=_UpperCamelCase)
self.argparsersEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : List[Any] = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
_lowerCamelCase : int = parser.parse_dict(_UpperCamelCase)[0]
_lowerCamelCase : List[Any] = BasicExample(**_UpperCamelCase)
self.assertEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Dict = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : Tuple = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(_UpperCamelCase , parser.parse_dict , _UpperCamelCase , allow_extra_keys=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
"""simple docstring"""
_lowerCamelCase : str = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : List[str] = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Union[str, Any] = os.path.join(_UpperCamelCase , """temp_json""")
os.mkdir(_UpperCamelCase)
with open(temp_local_path + """.json""" , """w+""") as f:
json.dump(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : List[str] = parser.parse_yaml_file(Path(temp_local_path + """.json"""))[0]
_lowerCamelCase : Dict = BasicExample(**_UpperCamelCase)
self.assertEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Tuple = HfArgumentParser(_UpperCamelCase)
_lowerCamelCase : Union[str, Any] = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : int = os.path.join(_UpperCamelCase , """temp_yaml""")
os.mkdir(_UpperCamelCase)
with open(temp_local_path + """.yaml""" , """w+""") as f:
yaml.dump(_UpperCamelCase , _UpperCamelCase)
_lowerCamelCase : Optional[int] = parser.parse_yaml_file(Path(temp_local_path + """.yaml"""))[0]
_lowerCamelCase : Any = BasicExample(**_UpperCamelCase)
self.assertEqual(_UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Optional[int] = HfArgumentParser(_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
| 710 | from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : float) ->float:
"""simple docstring"""
return 0.0
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : int = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_lowerCamelCase : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Tuple = [1] + [0] * (size - 1)
_lowerCamelCase : Optional[Any] = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Tuple = np.abs(np.fft.fft(__A ) )
_lowerCamelCase : List[Any] = 20 * np.logaa(__A )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
_lowerCamelCase : Any = get_bounds(__A , __A )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__A )
plt.show()
def A__ ( __A , __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = 512
_lowerCamelCase : Union[str, Any] = [1] + [0] * (size - 1)
_lowerCamelCase : int = [filter_type.process(__A ) for item in inputs]
_lowerCamelCase : Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
_lowerCamelCase : Any = np.angle(np.fft.fft(__A ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__A , -2 * pi ) )
plt.show()
| 15 | 0 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( __snake_case ):
def __init__( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any]=13 , UpperCAmelCase_ : str=7 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Optional[Any]=99 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : Dict=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Tuple=512 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : str=False , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[Any]="None" , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Optional[Any]=None , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : Union[str, Any] = seq_length
lowerCAmelCase : List[Any] = is_training
lowerCAmelCase : Any = use_input_mask
lowerCAmelCase : Dict = use_token_type_ids
lowerCAmelCase : Tuple = use_labels
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Optional[int] = num_hidden_layers
lowerCAmelCase : List[str] = num_attention_heads
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : int = hidden_act
lowerCAmelCase : str = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : List[Any] = type_vocab_size
lowerCAmelCase : int = type_sequence_label_size
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : Dict = num_labels
lowerCAmelCase : Tuple = num_choices
lowerCAmelCase : Optional[int] = relative_attention
lowerCAmelCase : int = position_biased_input
lowerCAmelCase : int = pos_att_type
lowerCAmelCase : str = scope
def lowercase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase : str = None
if self.use_token_type_ids:
lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Optional[Any] = None
lowerCAmelCase : List[str] = None
lowerCAmelCase : Any = None
if self.use_labels:
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : Optional[Any] ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase__ ( self : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ):
lowerCAmelCase : Optional[Any] = DebertaVaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCAmelCase : Tuple = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )[0]
lowerCAmelCase : int = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase )[0]
lowerCAmelCase : Tuple = model(_lowerCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any ):
lowerCAmelCase : Optional[Any] = DebertaVaForMaskedLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCAmelCase : str = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : Optional[int] = DebertaVaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCAmelCase : Optional[int] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_lowerCAmelCase )
def lowercase__ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : Any = self.num_labels
lowerCAmelCase : List[str] = DebertaVaForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCAmelCase : int = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : Union[str, Any] = DebertaVaForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCAmelCase : Optional[int] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str ):
lowerCAmelCase : Tuple = DebertaVaForMultipleChoice(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowerCAmelCase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase : int = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Dict = self.prepare_config_and_inputs()
(
lowerCAmelCase
) : int = config_and_inputs
lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __A ( __snake_case , __snake_case , unittest.TestCase ):
lowerCAmelCase_ : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Optional[int] = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Any = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Optional[Any] = False
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Any = DebertaVaModelTester(self )
lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def lowercase__ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_lowerCAmelCase )
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_lowerCAmelCase )
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_lowerCAmelCase )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_lowerCAmelCase )
def lowercase__ ( self : Any ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_lowerCAmelCase )
def lowercase__ ( self : int ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*_lowerCAmelCase )
@slow
def lowercase__ ( self : int ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : Any = DebertaVaModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def lowercase__ ( self : str ):
pass
@slow
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : int = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
lowerCAmelCase : Optional[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowerCAmelCase : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase : Any = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
# compare the actual values for a slice.
lowerCAmelCase : Optional[int] = torch.tensor(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) , f"{output[:, 1:4, 1:4]}" )
| 343 |
import sys
UpperCamelCase = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> int:
_lowercase : List[Any] = 1
for digit in s:
product *= int(SCREAMING_SNAKE_CASE )
return product
def __magic_name__ ( SCREAMING_SNAKE_CASE = N ) -> int:
_lowercase : Dict = -sys.maxsize - 1
_lowercase : Tuple = n[:13]
_lowercase : List[Any] = 13
while cur_index < len(SCREAMING_SNAKE_CASE ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
_lowercase : List[str] = substr[1:] + n[cur_index]
cur_index += 1
else:
_lowercase : str = max(SCREAMING_SNAKE_CASE , str_eval(SCREAMING_SNAKE_CASE ) )
_lowercase : Dict = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 66 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a : str = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[Any] = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__a : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 700 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__a : int = logging.get_logger(__name__)
__a : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__a : Any = {
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
__a : Optional[int] = {'''mobilebert-uncased''': 5_1_2}
__a : Dict = {}
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Optional[int] = VOCAB_FILES_NAMES
a : Any = PRETRAINED_VOCAB_FILES_MAP
a : Optional[int] = PRETRAINED_INIT_CONFIGURATION
a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Any = MobileBertTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase="[UNK]" , lowerCamelCase="[SEP]" , lowerCamelCase="[PAD]" , lowerCamelCase="[CLS]" , lowerCamelCase="[MASK]" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> Dict:
"""simple docstring"""
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , )
lowercase__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCamelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCamelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCamelCase ) != tokenize_chinese_chars
):
lowercase__ : List[str] = getattr(lowerCamelCase , normalizer_state.pop("type" ) )
lowercase__ : Dict = do_lower_case
lowercase__ : Dict = strip_accents
lowercase__ : List[Any] = tokenize_chinese_chars
lowercase__ : Any = normalizer_class(**lowerCamelCase )
lowercase__ : Union[str, Any] = do_lower_case
def __a ( self , lowerCamelCase , lowerCamelCase=None ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
lowercase__ : Optional[int] = [self.sep_token_id]
lowercase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
lowercase__ : List[Any] = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase ) | 298 | 0 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase_ ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase_ ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
_A = tmp_path / 'cache'
_A = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def lowerCamelCase_ ( __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Any ) -> Optional[int]:
"""simple docstring"""
_A = tmp_path / 'cache'
_A = {'text': 'string'}
_A = features.copy() if features else default_expected_features
_A = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_A = TextDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase_ ( __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ) -> str:
"""simple docstring"""
_A = tmp_path / 'cache'
_A = {'text': 'string'}
_A = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , split=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def lowerCamelCase_ ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int ) -> List[str]:
"""simple docstring"""
if issubclass(__UpperCamelCase , __UpperCamelCase ):
_A = text_path
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
_A = [text_path]
_A = tmp_path / 'cache'
_A = {'text': 'string'}
_A = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_dataset(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : int=("train",) ) -> int:
"""simple docstring"""
assert isinstance(__UpperCamelCase , __UpperCamelCase )
for split in splits:
_A = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def lowerCamelCase_ ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ) -> List[str]:
"""simple docstring"""
_A = tmp_path / 'cache'
_A = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A = TextDatasetReader({'train': text_path} , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def lowerCamelCase_ ( __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple ) -> Any:
"""simple docstring"""
_A = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_A = {'text': 'string'}
_A = features.copy() if features else default_expected_features
_A = (
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_A = TextDatasetReader({'train': text_path} , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase_ ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ) -> str:
"""simple docstring"""
if split:
_A = {split: text_path}
else:
_A = 'train'
_A = {'train': text_path, 'test': text_path}
_A = tmp_path / 'cache'
_A = {'text': 'string'}
_A = TextDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_text_datasetdict(__UpperCamelCase , __UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 292 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase_ ( __UpperCamelCase : list , __UpperCamelCase : int | None = None , __UpperCamelCase : int | None = None ) -> None:
"""simple docstring"""
if start is None:
_A = 0
if end is None:
_A = len(__UpperCamelCase ) - 1
if start >= end:
return
_A = (start + end) // 2
slowsort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
slowsort(__UpperCamelCase , mid + 1 , __UpperCamelCase )
if sequence[end] < sequence[mid]:
_A , _A = sequence[mid], sequence[end]
slowsort(__UpperCamelCase , __UpperCamelCase , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 292 | 1 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowercase__ =datasets.load_iris()
lowercase__ =np.array(data['data'])
lowercase__ =np.array(data['target'])
lowercase__ =data['target_names']
lowercase__ , lowercase__ , lowercase__ , lowercase__ =train_test_split(X, y)
def UpperCamelCase_ ( A__ , A__ ):
return np.linalg.norm(np.array(A__ ) - np.array(A__ ) )
def UpperCamelCase_ ( A__ , A__ , A__ , A__ , A__=5 ):
a_ = zip(A__ , A__ )
# List of distances of all points from the point to be classified
a_ = []
for data_point in data:
a_ = euclidean_distance(data_point[0] , A__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
a_ = [i[1] for i in sorted(A__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
a_ = Counter(A__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 719 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def UpperCamelCase_ ( A__ ):
# getting number of pixels in the image
a_ , a_ = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(A__ ):
for j in range(A__ ):
a_ = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowercase__ =imread('image_data/lena.jpg', 1)
# convert to its negative
lowercase__ =convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 511 | 0 |
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
lowerCAmelCase = args.pruning_method
lowerCAmelCase = args.threshold
lowerCAmelCase = args.model_name_or_path.rstrip('/' )
lowerCAmelCase = args.target_model_path
print(F'Load fine-pruned model from {model_name_or_path}' )
lowerCAmelCase = torch.load(os.path.join(_UpperCAmelCase , 'pytorch_model.bin' ) )
lowerCAmelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCAmelCase = tensor
print(F'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
lowerCAmelCase = tensor
print(F'Copied layer {name}' )
elif "bias" in name:
lowerCAmelCase = tensor
print(F'Copied layer {name}' )
else:
if pruning_method == "magnitude":
lowerCAmelCase = MagnitudeBinarizer.apply(inputs=_UpperCAmelCase , threshold=_UpperCAmelCase )
lowerCAmelCase = tensor * mask
print(F'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[F'{prefix_}mask_scores']
lowerCAmelCase = TopKBinarizer.apply(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = tensor * mask
print(F'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[F'{prefix_}mask_scores']
lowerCAmelCase = ThresholdBinarizer.apply(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = tensor * mask
print(F'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCAmelCase = name[:-6]
lowerCAmelCase = model[F'{prefix_}mask_scores']
lowerCAmelCase ,lowerCAmelCase = -0.1, 1.1
lowerCAmelCase = torch.sigmoid(_UpperCAmelCase )
lowerCAmelCase = s * (r - l) + l
lowerCAmelCase = s_bar.clamp(min=0.0 , max=1.0 )
lowerCAmelCase = tensor * mask
print(F'Pruned layer {name}' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
lowerCAmelCase = os.path.join(
os.path.dirname(_UpperCAmelCase ) , F'bertarized_{os.path.basename(_UpperCAmelCase )}' )
if not os.path.isdir(_UpperCAmelCase ):
shutil.copytree(_UpperCAmelCase , _UpperCAmelCase )
print(F'\nCreated folder {target_model_path}' )
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
__UpperCamelCase : List[Any] = parser.parse_args()
main(args)
| 4 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _snake_case ( lowerCamelCase ,lowerCamelCase ):
"""simple docstring"""
lowerCamelCase_ = '''pixel_values'''
lowerCamelCase_ = False
lowerCamelCase_ = TimmBackboneConfig
def __init__( self , a , **a ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , '''timm''' )
super().__init__(a )
_A = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(a , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
_A = getattr(a , '''use_pretrained_backbone''' , a )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
_A = config.out_indices if getattr(a , '''out_indices''' , a ) is not None else (-1,)
_A = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_A = self._backbone.return_layers
_A = {layer['''module''']: str(a ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(a )
@classmethod
def lowercase_ ( cls , a , *a , **a ) -> int:
"""simple docstring"""
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
_A = kwargs.pop('''config''' , TimmBackboneConfig() )
_A = kwargs.pop('''use_timm_backbone''' , a )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
_A = kwargs.pop('''num_channels''' , config.num_channels )
_A = kwargs.pop('''features_only''' , config.features_only )
_A = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
_A = kwargs.pop('''out_indices''' , config.out_indices )
_A = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a )
def lowercase_ ( self , a ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase_ ( self , a , a=None , a=None , a=None , **a ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
"""simple docstring"""
_A = return_dict if return_dict is not None else self.config.use_return_dict
_A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_A = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_A = self._all_layers
_A = self._backbone(a , **a )
_A = self._return_layers
_A = tuple(hidden_states[i] for i in self.out_indices )
else:
_A = self._backbone(a , **a )
_A = None
_A = tuple(a )
_A = tuple(a ) if hidden_states is not None else None
if not return_dict:
_A = (feature_maps,)
if output_hidden_states:
_A = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a ) | 317 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
A_ : Dict =[8, 5, 9, 7]
A_ : List[Any] =[
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A_ : str =[
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __UpperCAmelCase :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
lowerCAmelCase_ = claim_vector
lowerCAmelCase_ = allocated_resources_table
lowerCAmelCase_ = maximum_claim_table
def UpperCAmelCase_ ( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def UpperCAmelCase_ ( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def UpperCAmelCase_ ( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def UpperCAmelCase_ ( self ):
return {self.__need().index(_lowerCamelCase ): i for i in self.__need()}
def UpperCAmelCase_ ( self , **_lowerCamelCase ):
lowerCAmelCase_ = self.__need()
lowerCAmelCase_ = self.__allocated_resources_table
lowerCAmelCase_ = self.__available_resources()
lowerCAmelCase_ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
lowerCAmelCase_ = False
for each_need in need_list:
lowerCAmelCase_ = True
for index, need in enumerate(_lowerCamelCase ):
if need > available_resources[index]:
lowerCAmelCase_ = False
break
if execution:
lowerCAmelCase_ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowerCAmelCase_ = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(_lowerCamelCase )
# update available/freed resources stack
lowerCAmelCase_ = np.array(_lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(_lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def UpperCAmelCase_ ( self ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(_lowerCamelCase ) + 1}'''
+ ''' '''.join(F'''{it:>8}''' for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(_lowerCamelCase ) + 1}'''
+ ''' '''.join(F'''{it:>8}''' for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(_lowerCamelCase ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(_lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 606 | '''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
A_ : List[str] =logging.get_logger(__name__)
def snake_case_ ( __snake_case : int) -> int:
lowerCAmelCase_ = R'''\w+[.]\d+'''
lowerCAmelCase_ = re.findall(__snake_case , __snake_case)
for pat in pats:
lowerCAmelCase_ = key.replace(__snake_case , '''_'''.join(pat.split('''.''')))
return key
def snake_case_ ( __snake_case : str , __snake_case : List[str] , __snake_case : List[Any]) -> List[str]:
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key)
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0)
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase_ = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def snake_case_ ( __snake_case : List[str] , __snake_case : Any , __snake_case : int=42) -> Optional[int]:
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__snake_case))
lowerCAmelCase_ = flatten_dict(__snake_case)
lowerCAmelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = rename_key(__snake_case)
lowerCAmelCase_ = tuple(renamed_pt_key.split('''.'''))
# Correctly rename weight parameters
lowerCAmelCase_ ,lowerCAmelCase_ = rename_key_and_reshape_tensor(__snake_case , __snake_case , __snake_case)
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''')
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(__snake_case)
return unflatten_dict(__snake_case)
| 606 | 1 |
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_a = """bert-base-cased"""
_a = """google/pegasus-xsum"""
_a = [""" Sam ate lunch today.""", """Sams lunch ingredients."""]
_a = ["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
_a = """patrickvonplaten/t5-tiny-random"""
_a = """sshleifer/bart-tiny-random"""
_a = """sshleifer/tiny-mbart"""
_a = """sshleifer/tiny-marian-en-de"""
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any:
"""simple docstring"""
_UpperCamelCase = '''\n'''.join(__snake_case )
Path(__snake_case ).open('''w''' ).writelines(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__snake_case, F'''{split}.source''' ), __snake_case )
_dump_articles(os.path.join(__snake_case, F'''{split}.target''' ), __snake_case )
return tmp_dir
class _UpperCAmelCase( lowerCamelCase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def UpperCAmelCase ( self , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = AutoTokenizer.from_pretrained(__a)
_UpperCamelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
_UpperCamelCase = max(len(tokenizer.encode(__a)) for a in ARTICLES)
_UpperCamelCase = max(len(tokenizer.encode(__a)) for a in SUMMARIES)
_UpperCamelCase = 4
_UpperCamelCase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_UpperCamelCase , _UpperCamelCase = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
_UpperCamelCase = SeqaSeqDataset(
__a , data_dir=__a , type_path='''train''' , max_source_length=__a , max_target_length=__a , src_lang=__a , tgt_lang=__a , )
_UpperCamelCase = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert isinstance(__a , __a)
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_UpperCamelCase = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id)
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED])
def UpperCAmelCase ( self , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = AutoTokenizer.from_pretrained(__a)
_UpperCamelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())
_UpperCamelCase = max(len(tokenizer.encode(__a)) for a in ARTICLES)
_UpperCamelCase = max(len(tokenizer.encode(__a)) for a in SUMMARIES)
_UpperCamelCase = 4
_UpperCamelCase = LegacySeqaSeqDataset(
__a , data_dir=__a , type_path='''train''' , max_source_length=20 , max_target_length=__a , )
_UpperCamelCase = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn)
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''')
_UpperCamelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
_UpperCamelCase = tmp_dir.joinpath('''train.source''').open().readlines()
_UpperCamelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()))
pack_data_dir(__a , __a , 1_28 , __a)
_UpperCamelCase = {x.name for x in tmp_dir.iterdir()}
_UpperCamelCase = {x.name for x in save_dir.iterdir()}
_UpperCamelCase = save_dir.joinpath('''train.source''').open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__a) < len(__a)
assert len(__a) == 1
assert len(packed_examples[0]) == sum(len(__a) for x in orig_examples)
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''')
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
if not FAIRSEQ_AVAILABLE:
return
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_dataset(max_len=64)
_UpperCamelCase = 64
_UpperCamelCase = ds.make_dynamic_sampler(__a , required_batch_size_multiple=__a)
_UpperCamelCase = [len(__a) for x in batch_sampler]
assert len(set(__a)) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__a) == len(__a) # no dropped or added examples
_UpperCamelCase = DataLoader(__a , batch_sampler=__a , collate_fn=ds.collate_fn , num_workers=2)
_UpperCamelCase = []
_UpperCamelCase = []
for batch in data_loader:
_UpperCamelCase = batch['''input_ids'''].shape
_UpperCamelCase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_UpperCamelCase = np.product(batch['''input_ids'''].shape)
num_src_per_batch.append(__a)
if num_src_tokens > (max_tokens * 1.1):
failures.append(__a)
assert num_src_per_batch[0] == max(__a)
if failures:
raise AssertionError(F'''too many tokens in {len(__a)} batches''')
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_dataset(max_len=5_12)
_UpperCamelCase = 2
_UpperCamelCase = ds.make_sortish_sampler(__a , shuffle=__a)
_UpperCamelCase = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2)
_UpperCamelCase = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2 , sampler=__a)
_UpperCamelCase = tokenizer.pad_token_id
def count_pad_tokens(__a , __a="input_ids"):
return [batch[k].eq(__a).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__a , k='''labels''')) < sum(count_pad_tokens(__a , k='''labels'''))
assert sum(count_pad_tokens(__a)) < sum(count_pad_tokens(__a))
assert len(__a) == len(__a)
def UpperCAmelCase ( self , __a=10_00 , __a=1_28) -> int:
'''simple docstring'''
if os.getenv('''USE_REAL_DATA''' , __a):
_UpperCamelCase = '''examples/seq2seq/wmt_en_ro'''
_UpperCamelCase = max_len * 2 * 64
if not Path(__a).joinpath('''train.len''').exists():
save_len_file(__a , __a)
else:
_UpperCamelCase = '''examples/seq2seq/test_data/wmt_en_ro'''
_UpperCamelCase = max_len * 4
save_len_file(__a , __a)
_UpperCamelCase = AutoTokenizer.from_pretrained(__a)
_UpperCamelCase = SeqaSeqDataset(
__a , data_dir=__a , type_path='''train''' , max_source_length=__a , max_target_length=__a , n_obs=__a , )
return ds, max_tokens, tokenizer
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_dataset()
_UpperCamelCase = set(DistributedSortishSampler(__a , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=__a))
_UpperCamelCase = set(DistributedSortishSampler(__a , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=__a))
assert idsa.intersection(__a) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def UpperCAmelCase ( self , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = AutoTokenizer.from_pretrained(__a , use_fast=__a)
if tok_name == MBART_TINY:
_UpperCamelCase = SeqaSeqDataset(
__a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
_UpperCamelCase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_UpperCamelCase = SeqaSeqDataset(
__a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
_UpperCamelCase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__a) == 1 if tok_name == BART_TINY else len(__a) == 0
| 19 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = jnp.ones((batch_size, length) ) / length
return scores
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : int = 20
SCREAMING_SNAKE_CASE_ : int = self._get_uniform_logits(batch_size=2 ,length=snake_case__ )
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_ : Any = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
SCREAMING_SNAKE_CASE_ : Dict = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_ : int = jax.nn.softmax(snake_case__ ,axis=-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ : Dict = FlaxTemperatureLogitsWarper(temperature=1.3 )
SCREAMING_SNAKE_CASE_ : Any = jax.nn.softmax(temp_dist_warper_sharper(snake_case__ ,scores.copy() ,cur_len=snake_case__ ) ,axis=-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = jax.nn.softmax(temp_dist_warper_smoother(snake_case__ ,scores.copy() ,cur_len=snake_case__ ) ,axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_sharp[0, :] ,atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] ,warped_prob_smooth[0, :] ,atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() ,warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() ,warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() ,warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() ,warped_prob_smooth[1, :].min() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : Optional[Any] = 10
SCREAMING_SNAKE_CASE_ : int = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.broadcast_to(np.arange(snake_case__ )[None, :] ,(batch_size, vocab_size) ).copy()
SCREAMING_SNAKE_CASE_ : Optional[int] = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_k_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() ,7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() ,2 * [True] + 3 * [False] + 5 * [True] )
# check special case
SCREAMING_SNAKE_CASE_ : int = 5
SCREAMING_SNAKE_CASE_ : Any = FlaxTopKLogitsWarper(top_k=1 ,filter_value=0.0 ,min_tokens_to_keep=3 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.broadcast_to(np.arange(snake_case__ )[None, :] ,(batch_size, length) ).copy()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_k_warp_safety_check(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() ,[2, 2] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : str = 10
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_ : Any = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
SCREAMING_SNAKE_CASE_ : Dict = FlaxTopPLogitsWarper(0.8 )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.exp(top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_ : str = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_ : List[Any] = np.broadcast_to(np.arange(snake_case__ )[None, :] ,(batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_ : Any = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxTopPLogitsWarper(0.9 ,min_tokens_to_keep=2 ,filter_value=0.0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() ,[3, 2] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = 20
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : str = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=snake_case__ )
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor((batch_size, 20) ,vocab_size=20 )
SCREAMING_SNAKE_CASE_ : Optional[int] = 5
SCREAMING_SNAKE_CASE_ : List[str] = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = min_dist_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() ,4 * [-float('inf' )] )
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_ : Optional[int] = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = 15
SCREAMING_SNAKE_CASE_ : Any = min_dist_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = 20
SCREAMING_SNAKE_CASE_ : List[Any] = 4
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor((batch_size, 1) ,vocab_size=20 )
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : str = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() ,4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Dict = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 20
SCREAMING_SNAKE_CASE_ : Any = 4
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : Tuple = 5
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ ,eos_token_id=snake_case__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((batch_size, 4) ,vocab_size=20 )
SCREAMING_SNAKE_CASE_ : List[Any] = 4
SCREAMING_SNAKE_CASE_ : Dict = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() ,4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_ : List[Any] = 3
SCREAMING_SNAKE_CASE_ : int = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = logits_processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
self.assertFalse(jnp.isinf(snake_case__ ).any() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = 4
SCREAMING_SNAKE_CASE_ : Dict = 10
SCREAMING_SNAKE_CASE_ : int = 15
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((batch_size, sequence_length) ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = input_ids.copy()
SCREAMING_SNAKE_CASE_ : str = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ : int = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ : int = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ : Dict = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ : Any = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = 10
# no processor list
SCREAMING_SNAKE_CASE_ : Optional[int] = temp_dist_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = top_k_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = min_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = bos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = eos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# with processor list
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE_ : Any = processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
SCREAMING_SNAKE_CASE_ : Optional[Any] = 10
SCREAMING_SNAKE_CASE_ : Dict = 15
SCREAMING_SNAKE_CASE_ : Dict = 2
SCREAMING_SNAKE_CASE_ : Tuple = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor((batch_size, sequence_length) ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = input_ids.copy()
SCREAMING_SNAKE_CASE_ : List[Any] = self._get_uniform_logits(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_ : str = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE_ : List[str] = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE_ : int = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE_ : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : str = FlaxForcedEOSTokenLogitsProcessor(max_length=snake_case__ ,eos_token_id=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = 10
# no processor list
def run_no_processor_list(snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = temp_dist_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = top_k_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = top_p_warp(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = min_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = bos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = eos_dist_proc(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
return scores
# with processor list
def run_processor_list(snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE_ : List[str] = processor(snake_case__ ,snake_case__ ,cur_len=snake_case__ )
return scores
SCREAMING_SNAKE_CASE_ : Tuple = jax.jit(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = jax.jit(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = jitted_run_no_processor_list(snake_case__ ,snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = jitted_run_processor_list(snake_case__ ,snake_case__ ,snake_case__ )
# scores should be equal
self.assertTrue(jnp.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() ,input_ids_comp.tolist() )
| 105 | 0 |
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 718 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : Union[str, Any] = {
"configuration_bert": ["BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BertConfig", "BertOnnxConfig"],
"tokenization_bert": ["BasicTokenizer", "BertTokenizer", "WordpieceTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ["BertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BertForMaskedLM",
"BertForMultipleChoice",
"BertForNextSentencePrediction",
"BertForPreTraining",
"BertForQuestionAnswering",
"BertForSequenceClassification",
"BertForTokenClassification",
"BertLayer",
"BertLMHeadModel",
"BertModel",
"BertPreTrainedModel",
"load_tf_weights_in_bert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
"TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBertEmbeddings",
"TFBertForMaskedLM",
"TFBertForMultipleChoice",
"TFBertForNextSentencePrediction",
"TFBertForPreTraining",
"TFBertForQuestionAnswering",
"TFBertForSequenceClassification",
"TFBertForTokenClassification",
"TFBertLMHeadModel",
"TFBertMainLayer",
"TFBertModel",
"TFBertPreTrainedModel",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ["TFBertTokenizer"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
"FlaxBertForCausalLM",
"FlaxBertForMaskedLM",
"FlaxBertForMultipleChoice",
"FlaxBertForNextSentencePrediction",
"FlaxBertForPreTraining",
"FlaxBertForQuestionAnswering",
"FlaxBertForSequenceClassification",
"FlaxBertForTokenClassification",
"FlaxBertModel",
"FlaxBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 364 | 0 |
"""simple docstring"""
from timeit import timeit
lowerCAmelCase = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowerCAmelCase_ ( snake_case_ : str ) ->bool:
lowerCamelCase__ : str =0
lowerCamelCase__ : int =len(snake_case_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowerCAmelCase_ ( snake_case_ : str ) ->bool:
lowerCamelCase__ : List[str] =len(snake_case_ ) // 2
lowerCamelCase__ : int =len(snake_case_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_ : str ) ->bool:
if len(snake_case_ ) <= 2:
return True
if s[0] == s[len(snake_case_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowerCAmelCase_ ( snake_case_ : str ) ->bool:
return s == s[::-1]
def lowerCAmelCase_ ( snake_case_ : str ) ->None:
lowerCamelCase__ : Optional[int] =f"""all({name}(key) is value for key, value in test_data.items())"""
lowerCamelCase__ : int =f"""from __main__ import test_data, {name}"""
lowerCamelCase__ : Optional[Any] =5_0_0_0_0_0
lowerCamelCase__ : int =timeit(stmt=snake_case_ , setup=snake_case_ , number=snake_case_ )
print(f"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"""{key:21} {value}""")
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""") | 174 |
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int]=7 , lowerCamelCase_ :Union[str, Any]=3 , lowerCamelCase_ :Tuple=18 , lowerCamelCase_ :Optional[int]=30 , lowerCamelCase_ :Optional[int]=400 , lowerCamelCase_ :List[Any]=True , lowerCamelCase_ :Any=None , lowerCamelCase_ :Tuple=True , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :List[str]=[0.5, 0.5, 0.5] , lowerCamelCase_ :str=[0.5, 0.5, 0.5] , lowerCamelCase_ :List[str]=False , ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =size if size is not None else {'height': 20, 'width': 20}
lowerCamelCase__ : List[str] =crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowerCamelCase__ : List[str] =parent
lowerCamelCase__ : Any =batch_size
lowerCamelCase__ : str =num_channels
lowerCamelCase__ : Optional[Any] =image_size
lowerCamelCase__ : Optional[Any] =min_resolution
lowerCamelCase__ : Any =max_resolution
lowerCamelCase__ : Any =do_resize
lowerCamelCase__ : List[str] =size
lowerCamelCase__ : Union[str, Any] =do_center_crop
lowerCamelCase__ : str =crop_size
lowerCamelCase__ : Tuple =do_normalize
lowerCamelCase__ : Tuple =image_mean
lowerCamelCase__ : Dict =image_std
lowerCamelCase__ : Tuple =do_reduce_labels
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCAmelCase_ ( ) ->int:
lowerCamelCase__ : List[str] =load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
lowerCamelCase__ : Dict =Image.open(dataset[0]['file'] )
lowerCamelCase__ : List[str] =Image.open(dataset[1]['file'] )
return image, map
def lowerCAmelCase_ ( ) ->Optional[Any]:
lowerCamelCase__ : List[Any] =load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
lowerCamelCase__ : Dict =Image.open(ds[0]['file'] )
lowerCamelCase__ : int =Image.open(ds[1]['file'] )
lowerCamelCase__ : Dict =Image.open(ds[2]['file'] )
lowerCamelCase__ : Union[str, Any] =Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class A_ ( A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =BeitImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'size' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'do_center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'center_crop' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCamelCase_ , 'image_std' ) )
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 20, 'width': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase_ )
lowerCamelCase__ : List[Any] =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowerCamelCase_ )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
self.assertEqual(image_processor.do_reduce_labels , lowerCamelCase_ )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , Image.Image )
# Test not batched input
lowerCamelCase__ : List[Any] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase__ : List[str] =image_processing(lowerCamelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , numpify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[Any] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase__ : List[str] =image_processing(lowerCamelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : int =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : List[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
# Test not batched input
lowerCamelCase__ : Union[str, Any] =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowerCamelCase__ : Optional[Any] =image_processing(lowerCamelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : Any =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase_ , torchify=lowerCamelCase_ )
lowerCamelCase__ : int =[]
for image in image_inputs:
self.assertIsInstance(lowerCamelCase_ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowerCamelCase__ : Dict =image_processing(image_inputs[0] , maps[0] , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched
lowerCamelCase__ : Tuple =image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test not batched input (PIL images)
lowerCamelCase__ , lowerCamelCase__ : int =prepare_semantic_single_inputs()
lowerCamelCase__ : Union[str, Any] =image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
# Test batched input (PIL images)
lowerCamelCase__ , lowerCamelCase__ : int =prepare_semantic_batch_inputs()
lowerCamelCase__ : List[Any] =image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 )
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : str =self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowerCamelCase__ , lowerCamelCase__ : Any =prepare_semantic_single_inputs()
lowerCamelCase__ : Tuple =image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 150 )
lowerCamelCase__ : Optional[int] =True
lowerCamelCase__ : Any =image_processing(lowerCamelCase_ , lowerCamelCase_ , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 255 ) | 174 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = WavaVecaForSequenceClassification.from_pretrained(lowerCamelCase, config=lowerCamelCase)
__lowerCAmelCase = downstream_dict['''projector.weight''']
__lowerCAmelCase = downstream_dict['''projector.bias''']
__lowerCAmelCase = downstream_dict['''model.post_net.linear.weight''']
__lowerCAmelCase = downstream_dict['''model.post_net.linear.bias''']
return model
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = WavaVecaForAudioFrameClassification.from_pretrained(lowerCamelCase, config=lowerCamelCase)
__lowerCAmelCase = downstream_dict['''model.linear.weight''']
__lowerCAmelCase = downstream_dict['''model.linear.bias''']
return model
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = WavaVecaForXVector.from_pretrained(lowerCamelCase, config=lowerCamelCase)
__lowerCAmelCase = downstream_dict['''connector.weight''']
__lowerCAmelCase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel):
__lowerCAmelCase = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
__lowerCAmelCase = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
__lowerCAmelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowerCAmelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowerCAmelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowerCAmelCase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowerCAmelCase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''')
__lowerCAmelCase = checkpoint['''Downstream''']
__lowerCAmelCase = WavaVecaConfig.from_pretrained(lowerCamelCase)
__lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase, return_attention_mask=lowerCamelCase, do_normalize=lowerCamelCase)
__lowerCAmelCase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification'''):
__lowerCAmelCase = convert_classification(lowerCamelCase, lowerCamelCase, lowerCamelCase)
elif arch.endswith('''ForAudioFrameClassification'''):
__lowerCAmelCase = convert_diarization(lowerCamelCase, lowerCamelCase, lowerCamelCase)
elif arch.endswith('''ForXVector'''):
__lowerCAmelCase = convert_xvector(lowerCamelCase, lowerCamelCase, lowerCamelCase)
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""")
if hf_config.use_weighted_layer_sum:
__lowerCAmelCase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCamelCase)
hf_model.save_pretrained(lowerCamelCase)
if __name__ == "__main__":
_UpperCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
_UpperCAmelCase : Any = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 713 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase : str = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 474 | 0 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __A ( UpperCamelCase__ , unittest.TestCase ):
a__ : Tuple = FlaxAutoencoderKL
@property
def _lowercase (self : List[str] ):
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = jax.random.PRNGKey(0 )
UpperCAmelCase_ = jax.random.uniform(UpperCAmelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
| 78 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : Optional[Any] = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 343 | 0 |
import colorsys
from PIL import Image # type: ignore
def _SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : float , snake_case_ : int ):
__magic_name__ = x
__magic_name__ = y
for step in range(snake_case_ ): # noqa: B007
__magic_name__ = a * a - b * b + x
__magic_name__ = 2 * a * b + y
__magic_name__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _SCREAMING_SNAKE_CASE ( snake_case_ : float ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _SCREAMING_SNAKE_CASE ( snake_case_ : float ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case_ , 1 , 1 ) )
def _SCREAMING_SNAKE_CASE ( snake_case_ : int = 800 , snake_case_ : int = 600 , snake_case_ : float = -0.6 , snake_case_ : float = 0 , snake_case_ : float = 3.2 , snake_case_ : int = 50 , snake_case_ : bool = True , ):
__magic_name__ = Image.new('''RGB''' , (image_width, image_height) )
__magic_name__ = img.load()
# loop through the image-coordinates
for image_x in range(snake_case_ ):
for image_y in range(snake_case_ ):
# determine the figure-coordinates based on the image-coordinates
__magic_name__ = figure_width / image_width * image_height
__magic_name__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
__magic_name__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
__magic_name__ = get_distance(snake_case_ , snake_case_ , snake_case_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__magic_name__ = get_color_coded_rgb(snake_case_ )
else:
__magic_name__ = get_black_and_white_rgb(snake_case_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a_ : Any = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 701 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel | 678 | 0 |
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCamelCase_ (__A ):
__magic_name__ = ''''''
__magic_name__ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__magic_name__ = None # compression type in fsspec. ex: "gzip"
__magic_name__ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , lowerCAmelCase_ : str = "" , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[dict] = None , **lowerCAmelCase_ : List[str] ) -> List[Any]:
super().__init__(self , **lowerCAmelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCAmelCase_ : Dict = fsspec.open(
lowerCAmelCase_ , mode="rb" , protocol=lowerCAmelCase_ , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCAmelCase_ : Optional[int] = os.path.basename(self.file.path.split("::" )[0] )
UpperCAmelCase_ : Any = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
UpperCAmelCase_ : Any = None
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCAmelCase_ ).lstrip("/" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
if self.dir_cache is None:
UpperCAmelCase_ : Optional[Any] = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
UpperCAmelCase_ : str = {f["name"]: f}
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : str ) -> Any:
return self.file.open().read()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str = "rb" , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : Tuple , ) -> Tuple:
UpperCAmelCase_ : List[str] = self._strip_protocol(lowerCAmelCase_ )
if mode != "rb":
raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class UpperCamelCase_ (__A ):
__magic_name__ = '''bz2'''
__magic_name__ = '''bz2'''
__magic_name__ = '''.bz2'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''gzip'''
__magic_name__ = '''gzip'''
__magic_name__ = '''.gz'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''lz4'''
__magic_name__ = '''lz4'''
__magic_name__ = '''.lz4'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''xz'''
__magic_name__ = '''xz'''
__magic_name__ = '''.xz'''
class UpperCamelCase_ (__A ):
__magic_name__ = '''zstd'''
__magic_name__ = '''zstd'''
__magic_name__ = '''.zst'''
def __init__( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : str = "rb" , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[dict] = None , lowerCAmelCase_ : int = DEFAULT_BLOCK_SIZE , **lowerCAmelCase_ : List[Any] , ) -> Dict:
super().__init__(
fo=lowerCAmelCase_ , mode=lowerCAmelCase_ , target_protocol=lowerCAmelCase_ , target_options=lowerCAmelCase_ , block_size=lowerCAmelCase_ , **lowerCAmelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCAmelCase_ : Optional[Any] = self.file.__enter__
class UpperCamelCase_ :
def __init__( self : Tuple , lowerCAmelCase_ : List[Any] ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = file_
def __enter__( self : Tuple ) -> List[Any]:
self._file.__enter__()
return self
def __exit__( self : Union[str, Any] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : int ) -> Optional[int]:
self._file.__exit__(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __iter__( self : Optional[int] ) -> int:
return iter(self._file )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return next(self._file )
def __getattr__( self : Optional[int] , lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
return getattr(self._file , lowerCAmelCase_ )
def fixed_enter(*lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[Any] ):
return WrappedFile(_enter(*lowerCAmelCase_ , **lowerCAmelCase_ ) )
UpperCAmelCase_ : List[Any] = fixed_enter
| 95 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = """glpn"""
def __init__( self : str , _UpperCamelCase : Optional[int]=3 , _UpperCamelCase : Optional[int]=4 , _UpperCamelCase : Union[str, Any]=[2, 2, 2, 2] , _UpperCamelCase : Tuple=[8, 4, 2, 1] , _UpperCamelCase : Any=[32, 64, 160, 256] , _UpperCamelCase : Optional[Any]=[7, 3, 3, 3] , _UpperCamelCase : int=[4, 2, 2, 2] , _UpperCamelCase : str=[1, 2, 5, 8] , _UpperCamelCase : List[Any]=[4, 4, 4, 4] , _UpperCamelCase : Union[str, Any]="gelu" , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Dict=0.0 , _UpperCamelCase : Tuple=0.0_2 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Dict=1e-6 , _UpperCamelCase : Optional[Any]=64 , _UpperCamelCase : int=10 , _UpperCamelCase : Union[str, Any]=-1 , **_UpperCamelCase : List[str] , ):
super().__init__(**_UpperCamelCase)
_lowercase: str = num_channels
_lowercase: Optional[int] = num_encoder_blocks
_lowercase: List[str] = depths
_lowercase: Dict = sr_ratios
_lowercase: Any = hidden_sizes
_lowercase: int = patch_sizes
_lowercase: Tuple = strides
_lowercase: List[str] = mlp_ratios
_lowercase: Optional[int] = num_attention_heads
_lowercase: Any = hidden_act
_lowercase: Dict = hidden_dropout_prob
_lowercase: Any = attention_probs_dropout_prob
_lowercase: Tuple = initializer_range
_lowercase: List[Any] = drop_path_rate
_lowercase: str = layer_norm_eps
_lowercase: str = decoder_hidden_size
_lowercase: Optional[int] = max_depth
_lowercase: List[Any] = head_in_index
| 226 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : Union[str, Any] = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 713 |
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
return x if y == 0 else greatest_common_divisor(lowerCamelCase_ , x % y )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
return (x * y) // greatest_common_divisor(lowerCamelCase_ , lowerCamelCase_ )
def snake_case__ ( lowerCamelCase_ = 20 ):
A : Optional[Any] = 1
for i in range(1 , n + 1 ):
A : Dict = lcm(lowerCamelCase_ , lowerCamelCase_ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 423 | 0 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = GPTaTokenizer
UpperCamelCase = GPTaTokenizerFast
UpperCamelCase = True
UpperCamelCase = {'''add_prefix_space''': True}
UpperCamelCase = False
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
lowerCamelCase_ = dict(zip(A_ , range(len(A_ ) ) ) )
lowerCamelCase_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase_ = {'unk_token': '<unk>'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A_ ) )
def a__ ( self : Union[str, Any] , **A_ : Tuple ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **A_ )
def a__ ( self : int , **A_ : List[Any] ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def a__ ( self : Union[str, Any] , A_ : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = 'lower newer'
return input_text, output_text
def a__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCamelCase_ = tokenizer.tokenize(A_ , add_prefix_space=A_ )
self.assertListEqual(A_ , A_ )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = self.get_rust_tokenizer(add_prefix_space=A_ )
lowerCamelCase_ = 'lower newer'
# Testing tokenization
lowerCamelCase_ = tokenizer.tokenize(A_ , add_prefix_space=A_ )
lowerCamelCase_ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids without special tokens
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ , add_prefix_space=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
# Testing conversion to ids with special tokens
lowerCamelCase_ = self.get_rust_tokenizer(add_prefix_space=A_ )
lowerCamelCase_ = tokenizer.encode(A_ , add_prefix_space=A_ )
lowerCamelCase_ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
# Testing the unknown token
lowerCamelCase_ = tokens + [rust_tokenizer.unk_token]
lowerCamelCase_ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A_ ) , A_ )
def a__ ( self : int , *A_ : List[Any] , **A_ : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
def a__ ( self : Dict , A_ : Tuple=15 ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase_ = self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
# Simple input
lowerCamelCase_ = 'This is a simple input'
lowerCamelCase_ = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase_ = ('This is a simple input', 'This is a pair')
lowerCamelCase_ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' )
# Simple input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' )
# Simple input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding='max_length' )
# Pair input
self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding='max_length' )
# Pair input
self.assertRaises(
A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding='max_length' , )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
lowerCamelCase_ = 'This is a simple input'
lowerCamelCase_ = ['This is a simple input looooooooong', 'This is a simple input']
lowerCamelCase_ = ('This is a simple input', 'This is a pair')
lowerCamelCase_ = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
lowerCamelCase_ = tokenizer.pad_token_id
lowerCamelCase_ = tokenizer(A_ , padding='max_length' , max_length=30 , return_tensors='np' )
lowerCamelCase_ = tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' )
lowerCamelCase_ = tokenizer(*A_ , padding='max_length' , max_length=60 , return_tensors='np' )
lowerCamelCase_ = tokenizer(A_ , padding=A_ , truncate=A_ , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = '$$$'
lowerCamelCase_ = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=A_ , add_bos_token=A_ )
lowerCamelCase_ = 'This is a simple input'
lowerCamelCase_ = ['This is a simple input 1', 'This is a simple input 2']
lowerCamelCase_ = tokenizer.bos_token_id
lowerCamelCase_ = tokenizer(A_ )
lowerCamelCase_ = tokenizer(A_ )
self.assertEqual(out_s.input_ids[0] , A_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowerCamelCase_ = tokenizer.decode(out_s.input_ids )
lowerCamelCase_ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = [self.get_tokenizer(do_lower_case=A_ , add_bos_token=A_ )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowerCamelCase_ = 'Encode this.'
lowerCamelCase_ = 'This one too please.'
lowerCamelCase_ = tokenizer.encode(A_ , add_special_tokens=A_ )
encoded_sequence += tokenizer.encode(A_ , add_special_tokens=A_ )
lowerCamelCase_ = tokenizer.encode_plus(
A_ , A_ , add_special_tokens=A_ , return_special_tokens_mask=A_ , )
lowerCamelCase_ = encoded_sequence_dict['input_ids']
lowerCamelCase_ = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(A_ ) , len(A_ ) )
lowerCamelCase_ = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(A_ )
]
lowerCamelCase_ = [x for x in filtered_sequence if x is not None]
self.assertEqual(A_ , A_ )
@require_tokenizers
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : int ) -> str:
"""simple docstring"""
lowerCamelCase_ = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ )
lowerCamelCase_ = 'A photo of a cat'
lowerCamelCase_ = tokenizer.encode(
A_ , )
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('./test_opt' )
lowerCamelCase_ = tokenizer.encode(
A_ , )
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
def a__ ( self : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=A_ )
lowerCamelCase_ = 'A photo of a cat'
lowerCamelCase_ = tokenizer.encode(
A_ , )
# Same as above
self.assertEqual(A_ , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def a__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=A_ )
lowerCamelCase_ = 'bos'
lowerCamelCase_ = tokenizer.get_vocab()['bos']
lowerCamelCase_ = 'A photo of a cat'
lowerCamelCase_ = tokenizer.encode(
A_ , )
# We changed the bos token
self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
lowerCamelCase_ = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
lowerCamelCase_ = tokenizer.encode(
A_ , )
self.assertEqual(A_ , [31957, 250, 1345, 9, 10, 4758] )
| 70 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__lowerCAmelCase = 8
def _lowercase ( a__ : Optional[Any] , a__ : Any=BITS ) -> Dict:
"""simple docstring"""
_UpperCamelCase = x.device
_UpperCamelCase = (x * 2_55).int().clamp(0 , 2_55 )
_UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=a__ )
_UpperCamelCase = rearrange(a__ , "d -> d 1 1" )
_UpperCamelCase = rearrange(a__ , "b c h w -> b c 1 h w" )
_UpperCamelCase = ((x & mask) != 0).float()
_UpperCamelCase = rearrange(a__ , "b c d h w -> b (c d) h w" )
_UpperCamelCase = bits * 2 - 1
return bits
def _lowercase ( a__ : Optional[Any] , a__ : str=BITS ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = x.device
_UpperCamelCase = (x > 0).int()
_UpperCamelCase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=a__ , dtype=torch.intaa )
_UpperCamelCase = rearrange(a__ , "d -> d 1 1" )
_UpperCamelCase = rearrange(a__ , "b (c d) h w -> b c d h w" , d=8 )
_UpperCamelCase = reduce(x * mask , "b c d h w -> b c h w" , "sum" )
return (dec / 2_55).clamp(0.0 , 1.0 )
def _lowercase ( self : Optional[Any] , a__ : torch.FloatTensor , a__ : int , a__ : torch.FloatTensor , a__ : float = 0.0 , a__ : bool = True , a__ : Any=None , a__ : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
_UpperCamelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
_UpperCamelCase = self.alphas_cumprod[timestep]
_UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
_UpperCamelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
_UpperCamelCase = self.bit_scale
if self.config.clip_sample:
_UpperCamelCase = torch.clamp(a__ , -scale , a__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
_UpperCamelCase = self._get_variance(a__ , a__ )
_UpperCamelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
_UpperCamelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_UpperCamelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
_UpperCamelCase = model_output.device if torch.is_tensor(a__ ) else "cpu"
_UpperCamelCase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=a__ ).to(a__ )
_UpperCamelCase = self._get_variance(a__ , a__ ) ** 0.5 * eta * noise
_UpperCamelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=a__ , pred_original_sample=a__ )
def _lowercase ( self : str , a__ : torch.FloatTensor , a__ : int , a__ : torch.FloatTensor , a__ : int="epsilon" , a__ : int=None , a__ : bool = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
_UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
_UpperCamelCase , _UpperCamelCase = torch.split(a__ , sample.shape[1] , dim=1 )
else:
_UpperCamelCase = None
# 1. compute alphas, betas
_UpperCamelCase = self.alphas_cumprod[t]
_UpperCamelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
_UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
_UpperCamelCase = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
_UpperCamelCase = self.bit_scale
if self.config.clip_sample:
_UpperCamelCase = torch.clamp(a__ , -scale , a__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCamelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
_UpperCamelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_UpperCamelCase = 0
if t > 0:
_UpperCamelCase = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=a__ ).to(model_output.device )
_UpperCamelCase = (self._get_variance(a__ , predicted_variance=a__ ) ** 0.5) * noise
_UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=a__ , pred_original_sample=a__ )
class lowerCamelCase_ ( lowercase ):
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1.0 , ) -> Dict:
"""simple docstring"""
super().__init__()
_UpperCamelCase = bit_scale
_UpperCamelCase = (
ddim_bit_scheduler_step if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
@torch.no_grad()
def __call__( self , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 2_56 , lowerCamelCase_ = 50 , lowerCamelCase_ = None , lowerCamelCase_ = 1 , lowerCamelCase_ = "pil" , lowerCamelCase_ = True , **lowerCamelCase_ , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_UpperCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=lowerCamelCase_ , )
_UpperCamelCase = decimal_to_bits(lowerCamelCase_ ) * self.bit_scale
_UpperCamelCase = latents.to(self.device )
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
_UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
_UpperCamelCase = bits_to_decimal(lowerCamelCase_ )
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 147 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = '▁'
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'sentencepiece.bpe.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'facebook/xglm-564M': 20_48,
}
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :Optional[Any] =VOCAB_FILES_NAMES
a_ :Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
a_ :List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ :Optional[int] =["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE__ : str="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<unk>" , SCREAMING_SNAKE_CASE__ : Tuple="<pad>" , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
__a = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
__a = 7
__a = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
__a = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
__a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a = 1
# Mimic fairseq token-to-id alignment for the first 4 token
__a = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
__a = len(self.sp_model )
__a = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ )
__a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ):
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
__a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __a ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
__a = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __a ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
def __a ( self : int , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
'''simple docstring'''
__a = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __a ( self : List[Any] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __a ( self : Any ):
'''simple docstring'''
__a = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __a ( self : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __a ( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __a ( self : int , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a = """""".join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , """ """ ).strip()
return out_string
def __a ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 201 |
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class _snake_case ( A__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =XLNetTokenizer
UpperCamelCase__ =XLNetTokenizerFast
UpperCamelCase__ =True
UpperCamelCase__ =True
def snake_case_ ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ :Tuple = XLNetTokenizer(snake_case , keep_accents=snake_case )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def snake_case_ ( self : Optional[Any] ):
UpperCAmelCase_ :int = '''<s>'''
UpperCAmelCase_ :Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def snake_case_ ( self : int ):
UpperCAmelCase_ :Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<eod>''' )
self.assertEqual(len(snake_case ) , 1_006 )
def snake_case_ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :Any = XLNetTokenizer(snake_case , keep_accents=snake_case )
UpperCAmelCase_ :Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [285, 46, 10, 170, 382] )
UpperCAmelCase_ :Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase_ :Any = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(snake_case , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
UpperCAmelCase_ :Tuple = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :List[Any] = XLNetTokenizer(snake_case , do_lower_case=snake_case )
UpperCAmelCase_ :Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + '''''',
'''i''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] )
def snake_case_ ( self : List[Any] ):
UpperCAmelCase_ :List[str] = XLNetTokenizer(snake_case , do_lower_case=snake_case )
UpperCAmelCase_ :Any = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
snake_case , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''se''',
'''.''',
] , )
@slow
def snake_case_ ( self : Any ):
UpperCAmelCase_ :List[str] = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' )
UpperCAmelCase_ :List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=snake_case )
UpperCAmelCase_ :int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=snake_case )
UpperCAmelCase_ :Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case )
UpperCAmelCase_ :Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case , snake_case )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def snake_case_ ( self : int ):
# fmt: off
UpperCAmelCase_ :str = {'''input_ids''': [[17, 21_442, 270, 17, 10, 14_645, 318, 34, 17, 4_546, 3_145, 787, 13, 7_752, 22_018, 23, 21, 17, 4_546, 3_145, 787, 13, 3_352, 14_431, 13, 5_500, 11, 1_176, 580, 13, 16_819, 4_797, 23, 17, 10, 17_135, 658, 19, 457, 7_932, 13, 184, 19, 3_154, 17_135, 6_468, 19, 1_404, 12_269, 19, 4_229, 5_356, 16_264, 46, 19, 17, 20_545, 10_395, 9, 9, 9, 11, 28, 6_421, 9_531, 20_729, 17, 10, 353, 17_022, 11, 21, 6_421, 9_531, 16_949, 17, 10, 11_509, 753, 11, 33, 95, 2_421, 7_385, 956, 14_431, 2_626, 25, 842, 7_385, 4_836, 21, 1_429, 2_272, 9_855, 3_120, 161, 24_738, 19, 13_203, 658, 218, 787, 21, 430, 18_482, 847, 2_637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22_178, 27, 1_064, 22, 956, 13, 11_101, 1_429, 5_854, 24_313, 18_953, 40, 422, 24_366, 68, 1_758, 37, 10_483, 14_257, 31, 207, 263, 21, 203, 3_773, 25, 71, 9_735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2_049, 3_442, 17, 13_894, 3_380, 23, 95, 18, 17_634, 2_288, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
| 608 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def a ( __snake_case : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ :str = FileLock(str(tmpdir / '''foo.lock''' ) )
UpperCAmelCase_ :Tuple = FileLock(str(tmpdir / '''foo.lock''' ) )
UpperCAmelCase_ :int = 0.01
with locka.acquire():
with pytest.raises(__snake_case ):
UpperCAmelCase_ :List[str] = time.time()
locka.acquire(__snake_case )
assert time.time() - _start > timeout
def a ( __snake_case : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ :Optional[int] = '''a''' * 1000 + '''.lock'''
UpperCAmelCase_ :Any = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('''.lock''' )
assert not locka._lock_file.endswith(__snake_case )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCAmelCase_ :Optional[int] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__snake_case ):
locka.acquire(0 )
| 608 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : List[str] = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = 'canine'
def __init__(self, lowerCamelCase_=7_6_8, lowerCamelCase_=1_2, lowerCamelCase_=1_2, lowerCamelCase_=3_0_7_2, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_6_3_8_4, lowerCamelCase_=1_6, lowerCamelCase_=0.02, lowerCamelCase_=1e-12, lowerCamelCase_=0, lowerCamelCase_=0xE000, lowerCamelCase_=0xE001, lowerCamelCase_=4, lowerCamelCase_=4, lowerCamelCase_=8, lowerCamelCase_=1_6_3_8_4, lowerCamelCase_=1_2_8, **lowerCamelCase_, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase_, bos_token_id=lowerCamelCase_, eos_token_id=lowerCamelCase_, **lowerCamelCase_ )
lowerCamelCase__ : Optional[int] = max_position_embeddings
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : Any = initializer_range
lowerCamelCase__ : Tuple = type_vocab_size
lowerCamelCase__ : Dict = layer_norm_eps
# Character config:
lowerCamelCase__ : Any = downsampling_rate
lowerCamelCase__ : Optional[int] = upsampling_kernel_size
lowerCamelCase__ : Union[str, Any] = num_hash_functions
lowerCamelCase__ : str = num_hash_buckets
lowerCamelCase__ : List[str] = local_transformer_stride
| 696 |
"""simple docstring"""
A_ : List[str] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 696 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'speech_to_text_2'
lowerCamelCase_ = ['past_key_values']
lowerCamelCase_ = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , UpperCAmelCase__ : Dict=10000 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : str=2048 , UpperCAmelCase__ : Optional[Any]=4 , UpperCAmelCase__ : Union[str, Any]=0.0 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]="relu" , UpperCAmelCase__ : List[str]=256 , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : int=2 , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : Optional[Any]=0 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : List[Any]=1024 , **UpperCAmelCase__ : Dict , ):
'''simple docstring'''
lowercase : List[str] =vocab_size
lowercase : Optional[int] =d_model
lowercase : Optional[Any] =decoder_ffn_dim
lowercase : Any =decoder_layers
lowercase : Dict =decoder_attention_heads
lowercase : List[Any] =dropout
lowercase : List[Any] =attention_dropout
lowercase : Any =activation_dropout
lowercase : Optional[Any] =activation_function
lowercase : Optional[int] =init_std
lowercase : Dict =decoder_layerdrop
lowercase : Optional[int] =use_cache
lowercase : Optional[Any] =decoder_layers
lowercase : List[str] =scale_embedding # scale factor will be sqrt(d_model) if True
lowercase : str =max_target_positions
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
| 92 |
from __future__ import annotations
from typing import TypedDict
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(__lowerCamelCase ) )]
def lowerCamelCase__ (__lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
_SCREAMING_SNAKE_CASE : Any = all_rotations(__lowerCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_SCREAMING_SNAKE_CASE : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__lowerCamelCase ),
}
return response
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if not isinstance(__lowerCamelCase, __lowerCamelCase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
_SCREAMING_SNAKE_CASE : Optional[int] = int(__lowerCamelCase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(__lowerCamelCase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
_SCREAMING_SNAKE_CASE : Optional[int] = [""] * len(__lowerCamelCase )
for _ in range(len(__lowerCamelCase ) ):
for i in range(len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE : List[str] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
UpperCamelCase__ ='Provide a string that I will generate its BWT transform: '
UpperCamelCase__ =input(entry_msg).strip()
UpperCamelCase__ =bwt_transform(s)
print(
f"Burrows Wheeler transform for string '{s}' results "
f"in '{result['bwt_string']}'"
)
UpperCamelCase__ =reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
f"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' "
f"we get original string '{original_string}'"
) | 249 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
A_ = Features({"""text""": Value("""string""" )} )
A_ = Features({"""summary""": Value("""string""" )} )
A_ = "text"
A_ = "summary"
@property
def __UpperCAmelCase ( self : List[str] ) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 4 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_A : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(A )
class lowerCamelCase__ ( A ):
'''simple docstring'''
def __init__( self : Tuple , **UpperCamelCase_ : List[str] ) -> int:
'''simple docstring'''
super().__init__(**UpperCamelCase_ )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : int , UpperCamelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : List[Any] , **UpperCamelCase_ : str ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = {}
if "candidate_labels" in kwargs:
_lowercase : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : int = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : str="This is a photo of {}." ) -> Union[str, Any]:
'''simple docstring'''
_lowercase : Dict = load_image(UpperCamelCase_ )
_lowercase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
_lowercase : Optional[Any] = candidate_labels
_lowercase : List[Any] = [hypothesis_template.format(UpperCamelCase_ ) for x in candidate_labels]
_lowercase : Union[str, Any] = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework , padding=UpperCamelCase_ )
_lowercase : Any = [text_inputs]
return inputs
def __UpperCAmelCase ( self : str , UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = model_inputs.pop('candidate_labels' )
_lowercase : List[str] = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , UpperCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[str] = text_inputs[0][0]
_lowercase : Optional[Any] = self.model(**UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Optional[Any] = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __UpperCAmelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> List[str]:
'''simple docstring'''
_lowercase : Optional[int] = model_outputs.pop('candidate_labels' )
_lowercase : Optional[int] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
_lowercase : Tuple = probs.tolist()
if not isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_lowercase : List[Any] = [scores]
elif self.framework == "tf":
_lowercase : Optional[int] = stable_softmax(UpperCamelCase_ , axis=-1 )
_lowercase : List[Any] = probs.numpy().tolist()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase_ , UpperCamelCase_ ) , key=lambda UpperCamelCase_ : -x[0] )
]
return result
| 4 | 1 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _lowercase( __a : List[Any] ):
a__ =[
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def _lowercase( __a : str ):
a__ , a__ =emb.weight.shape
a__ =nn.Linear(__a , __a , bias=__a )
a__ =emb.weight.data
return lin_layer
def _lowercase( __a : int , __a : Optional[Any]=None ):
a__ ={}
for old_key in state_dict.keys():
a__ =old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
a__ =key.replace('moe_layer.experts.0' , f"""ffn.experts.expert_{expert_idx}""" )
else:
a__ =key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
a__ =key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
a__ =key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
a__ =key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
a__ =key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
a__ =key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
a__ =key.replace('final_layer_norm' , 'ff_layer_norm' )
a__ =state_dict[old_key]
return new_dict
def _lowercase( __a : Optional[Any] , __a : Dict , __a : Optional[Any] , __a : Tuple , __a : str = WEIGHTS_NAME ):
a__ =[]
a__ =0
os.makedirs(__a , exist_ok=__a )
for expert in range(__a ):
a__ =switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(__a ):
a__ =torch.load(__a )['model']
remove_ignore_keys_(__a )
a__ =rename_fairseq_keys(__a , __a )
a__ =os.path.join(
__a , weights_name.replace('.bin' , f"""-{len(__a )+1:05d}-of-???.bin""" ) )
torch.save(__a , __a )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__a )[0]].dtype )
# Add the last block
a__ =os.path.join(__a , weights_name.replace('.bin' , f"""-{len(__a )+1:05d}-of-???.bin""" ) )
a__ =torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__a )
a__ =rename_fairseq_keys(__a , __a )
a__ =shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__a ) == 1:
a__ =os.path.join(__a , __a )
torch.save(__a , __a )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__a , __a )
# Otherwise, let's build the index
a__ ={}
for idx, shard in enumerate(__a ):
a__ =weights_name.replace('.bin' , f"""-{idx+1:05d}-of-{len(__a ):05d}.bin""" )
a__ =os.path.join(__a , weights_name.replace('.bin' , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__a , os.path.join(__a , __a ) )
for key in shard:
a__ =shard_file
# Add the metadata
a__ ={'total_size': total_size}
a__ ={'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__a , __a ) , 'w' , encoding='utf-8' ) as f:
a__ =json.dumps(__a , indent=2 , sort_keys=__a ) + '\n'
f.write(__a )
return metadata, index
if __name__ == "__main__":
_lowerCAmelCase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
_lowerCAmelCase: List[Any] = parser.parse_args()
_lowerCAmelCase , _lowerCAmelCase: Dict = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowerCAmelCase: Union[str, Any] = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCAmelCase: str = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 20 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
SCREAMING_SNAKE_CASE : Union[str, Any] = "Create a default config file for Accelerate with only a few flags set."
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any]="no" , SCREAMING_SNAKE_CASE_ : str = default_json_config_file , SCREAMING_SNAKE_CASE_ : bool = False ):
"""simple docstring"""
a_ : Optional[Any] = Path(SCREAMING_SNAKE_CASE_ )
path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
if path.exists():
print(
F"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
a_ : Tuple = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
a_ : str = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
a_ : Union[str, Any] = torch.cuda.device_count()
a_ : Any = num_gpus
a_ : Union[str, Any] = False
if num_gpus > 1:
a_ : str = """MULTI_GPU"""
else:
a_ : List[str] = """NO"""
elif is_xpu_available() and use_xpu:
a_ : List[Any] = torch.xpu.device_count()
a_ : List[str] = num_xpus
a_ : List[Any] = False
if num_xpus > 1:
a_ : List[str] = """MULTI_XPU"""
else:
a_ : Union[str, Any] = """NO"""
elif is_npu_available():
a_ : Tuple = torch.npu.device_count()
a_ : Union[str, Any] = num_npus
a_ : List[Any] = False
if num_npus > 1:
a_ : List[str] = """MULTI_NPU"""
else:
a_ : Optional[int] = """NO"""
else:
a_ : List[str] = 0
a_ : Optional[int] = True
a_ : Optional[Any] = 1
a_ : List[str] = """NO"""
a_ : int = ClusterConfig(**SCREAMING_SNAKE_CASE_ )
config.to_json_file(SCREAMING_SNAKE_CASE_ )
return path
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
a_ : Dict = parser.add_parser("""default""" , parents=SCREAMING_SNAKE_CASE_ , help=SCREAMING_SNAKE_CASE_ , formatter_class=SCREAMING_SNAKE_CASE_ )
parser.add_argument(
"""--config_file""" , default=SCREAMING_SNAKE_CASE_ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=SCREAMING_SNAKE_CASE_ , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
a_ : int = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F"""accelerate configuration saved at {config_file}""" )
| 419 | 0 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCamelCase__ ( _lowercase , _lowercase = True , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = math.inf , _lowercase = -math.inf , _lowercase = False , _lowercase = 100 , _lowercase = 0.01 , _lowercase = 1 , ):
'''simple docstring'''
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Optional[Any] = search_prob
UpperCAmelCase_ : List[str] = start_temperate
UpperCAmelCase_ : Any = []
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Union[str, Any] = None
while not search_end:
UpperCAmelCase_ : Optional[int] = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase_ : Tuple = current_state
scores.append(_lowercase )
iterations += 1
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Tuple = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase_ : Tuple = random.randint(0 , len(_lowercase ) - 1 ) # picking a random neighbor
UpperCAmelCase_ : str = neighbors.pop(_lowercase )
UpperCAmelCase_ : int = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase_ : Optional[Any] = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase_ : List[Any] = picked_neighbor
else:
UpperCAmelCase_ : int = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase_ : List[str] = picked_neighbor
UpperCAmelCase_ : Optional[Any] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase_ : List[Any] = True
else:
UpperCAmelCase_ : str = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowercase ) , _lowercase )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
__a = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__a = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
__a = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
__a = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
__a = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__a = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
)
__a = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
__a = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"""{local_min.score()}"""
) | 720 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return (data["data"], data["target"])
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = XGBClassifier()
classifier.fit(_lowercase , _lowercase )
return classifier
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = load_iris()
UpperCAmelCase_, UpperCAmelCase_ : Any = data_handling(_lowercase )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[int] = train_test_split(
_lowercase , _lowercase , test_size=0.25 )
UpperCAmelCase_ : Dict = iris['''target_names''']
# Create an XGBoost Classifier from the training data
UpperCAmelCase_ : int = xgboost(_lowercase , _lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_lowercase , _lowercase , _lowercase , display_labels=_lowercase , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main() | 300 | 0 |
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class A__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase ) -> int:
"""simple docstring"""
__magic_name__ : str = parent
def lowercase ( self ) -> List[str]:
"""simple docstring"""
return {}
def lowerCAmelCase ( ) ->List[str]:
"""simple docstring"""
__magic_name__ : int = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
__magic_name__ : Dict = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase__ : Optional[int] =MarkupLMFeatureExtractor if is_bsa_available() else None
def lowercase ( self ) -> Tuple:
"""simple docstring"""
__magic_name__ : Optional[Any] = MarkupLMFeatureExtractionTester(self )
@property
def lowercase ( self ) -> Any:
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowercase ( self ) -> str:
"""simple docstring"""
__magic_name__ : Optional[int] = self.feature_extraction_class()
# Test not batched input
__magic_name__ : Optional[int] = get_html_strings()[0]
__magic_name__ : Union[str, Any] = feature_extractor(lowerCamelCase )
# fmt: off
__magic_name__ : str = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
__magic_name__ : Dict = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCamelCase )
self.assertEqual(encoding.xpaths , lowerCamelCase )
# Test batched
__magic_name__ : Optional[int] = get_html_strings()
__magic_name__ : Optional[int] = feature_extractor(lowerCamelCase )
# fmt: off
__magic_name__ : int = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
__magic_name__ : List[str] = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCamelCase )
self.assertEqual(encoding.xpaths , lowerCamelCase )
| 154 |
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowercase_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class A__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCamelCase = 101 ) -> Any:
"""simple docstring"""
__magic_name__ : List[str] = length
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return self.length
def __getitem__( self , lowerCamelCase ) -> int:
"""simple docstring"""
return i
class A__ :
def __call__( self , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
return {"input_ids": torch.tensor(lowerCamelCase ), "labels": torch.tensor(lowerCamelCase )}
class A__ ( nn.Module ):
def __init__( self ) -> Tuple:
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__magic_name__ : Tuple = nn.Linear(120 , 80 )
def lowercase ( self , lowerCamelCase , lowerCamelCase=None ) -> List[str]:
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class A__ ( __SCREAMING_SNAKE_CASE ):
@require_torch_neuroncore
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = F'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
__magic_name__ : Dict = self.get_auto_remove_tmp_dir()
__magic_name__ : str = F'''--output_dir {output_dir}'''.split()
__magic_name__ : Optional[Any] = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class A__ ( __SCREAMING_SNAKE_CASE ):
@require_torch_multi_gpu
def lowercase ( self ) -> str:
"""simple docstring"""
__magic_name__ : Optional[Any] = F'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
__magic_name__ : str = self.get_auto_remove_tmp_dir()
__magic_name__ : List[str] = F'''--output_dir {output_dir}'''.split()
__magic_name__ : Tuple = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(lowerCamelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowercase_ = HfArgumentParser((TrainingArguments,))
lowercase_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
f"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowercase_ = DummyDataset(dataset_length)
def lowerCAmelCase ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
__magic_name__ : str = list(range(len(UpperCAmelCase ) ) )
__magic_name__ : List[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'''Predictions and/or labels do not match expected results:\n - predictions: '''
F'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
lowercase_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowercase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase_ = 2
lowercase_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase_ = None
| 154 | 1 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=6_4 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
"""simple docstring"""
A_ : List[Any] = parent
A_ : Dict = batch_size
A_ : int = seq_length
A_ : Optional[int] = is_training
A_ : List[Any] = use_input_mask
A_ : Any = use_token_type_ids
A_ : Dict = use_labels
A_ : List[str] = vocab_size
A_ : Optional[Any] = hidden_size
A_ : str = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : int = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : str = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Optional[int] = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Tuple = initializer_range
A_ : List[str] = num_labels
A_ : Dict = num_choices
A_ : str = scope
A_ : List[str] = vocab_size - 1
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : List[Any] = None
if self.use_input_mask:
A_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
A_ : str = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self ):
"""simple docstring"""
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Tuple = self.prepare_config_and_inputs()
A_ : Union[str, Any] = True
return config, input_ids, input_mask, token_labels
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = GPTNeoXModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A_ : Dict = model(snake_case_ , attention_mask=snake_case_ )
A_ : List[str] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Dict = True
A_ : List[str] = GPTNeoXModel(snake_case_ )
model.to(snake_case_ )
model.eval()
A_ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Tuple = GPTNeoXForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A_ : List[str] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = self.num_labels
A_ : Tuple = GPTNeoXForQuestionAnswering(snake_case_ )
model.to(snake_case_ )
model.eval()
A_ : Union[str, Any] = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Dict = self.num_labels
A_ : Optional[int] = GPTNeoXForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
A_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Tuple = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = self.num_labels
A_ : List[str] = GPTNeoXForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
A_ : List[str] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Tuple = True
A_ : Optional[Any] = GPTNeoXForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# first forward pass
A_ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
A_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : str = ids_tensor((self.batch_size, 3) , config.vocab_size )
A_ : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
A_ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ : Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
A_ : Optional[int] = model(snake_case_ , attention_mask=snake_case_ , output_hidden_states=snake_case_ )
A_ : Tuple = output_from_no_past['hidden_states'][0]
A_ : Any = model(
snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )['hidden_states'][0]
# select random slice
A_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.prepare_config_and_inputs()
A_ , A_ , A_ , A_ : List[Any] = config_and_inputs
A_ : int = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : List[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase_ : str = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowercase_ : int = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : List[Any] = False
lowercase_ : Tuple = False
lowercase_ : Tuple = False
lowercase_ : List[str] = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : str = GPTNeoXModelTester(self )
A_ : Optional[int] = ConfigTester(self , config_class=snake_case_ , hidden_size=6_4 , num_attention_heads=8 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ , A_ , A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
A_ : Tuple = None
self.model_tester.create_and_check_model_as_decoder(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ , A_ , A_ , A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@unittest.skip(reason='Feed forward chunking is not implemented' )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = ids_tensor([1, 1_0] , config.vocab_size )
A_ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
A_ : Union[str, Any] = GPTNeoXModel(snake_case_ )
original_model.to(snake_case_ )
original_model.eval()
A_ : List[str] = original_model(snake_case_ ).last_hidden_state
A_ : List[str] = original_model(snake_case_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {'type': scaling_type, 'factor': 10.0}
A_ : List[str] = GPTNeoXModel(snake_case_ )
scaled_model.to(snake_case_ )
scaled_model.eval()
A_ : Optional[Any] = scaled_model(snake_case_ ).last_hidden_state
A_ : Optional[Any] = scaled_model(snake_case_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1E-5 ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped' )
for checkpointing in [True, False]:
A_ : List[str] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(snake_case_ )
A_ : Tuple = tokenizer('My favorite food is' , return_tensors='pt' ).to(snake_case_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
A_ : Dict = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
A_ : int = model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=2_0 )
A_ : Optional[Any] = tokenizer.batch_decode(snake_case_ )[0]
self.assertEqual(snake_case_ , snake_case_ ) | 302 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : int
lowercase_ : int
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : list[list[Edge]] = [[] for _ in range(snake_case_ )]
A_ : Optional[int] = size
def __getitem__( self , snake_case_ ):
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._size
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(snake_case_ , snake_case_ ) )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Optional[Any] = 0
while queue:
A_ : Union[str, Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : int = current_distance + edge.weight
A_ : Union[str, Any] = distances[edge.destination_vertex]
if (
isinstance(snake_case_ , snake_case_ )
and new_distance >= dest_vertex_distance
):
continue
A_ : Union[str, Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 302 | 1 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__lowercase : str = 'pt'
elif is_tf_available():
__lowercase : Optional[Any] = 'tf'
else:
__lowercase : Tuple = 'jax'
class __UpperCamelCase ( __a , unittest.TestCase ):
A_ = ByTaTokenizer
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
__a : Optional[Any] = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __UpperCAmelCase ( self , **__a ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def __UpperCAmelCase ( self , __a , __a=False , __a=20 , __a=5 ):
'''simple docstring'''
__a : Optional[int] = []
for i in range(len(a__ ) ):
try:
__a : List[str] = tokenizer.decode([i] , clean_up_tokenization_spaces=a__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__a : Tuple = list(filter(lambda __a : re.match(r'^[ a-zA-Z]+$' , t[1] ) , a__ ) )
__a : Optional[int] = list(filter(lambda __a : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=a__ ) , a__ ) )
if max_length is not None and len(a__ ) > max_length:
__a : Optional[Any] = toks[:max_length]
if min_length is not None and len(a__ ) < min_length and len(a__ ) > 0:
while len(a__ ) < min_length:
__a : str = toks + toks
# toks_str = [t[1] for t in toks]
__a : Any = [t[0] for t in toks]
# Ensure consistency
__a : Any = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
if " " not in output_txt and len(a__ ) > 1:
__a : List[str] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a__ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a__ )
)
if with_prefix_space:
__a : int = ' ' + output_txt
__a : Optional[int] = tokenizer.encode(a__ , add_special_tokens=a__ )
return output_txt, output_ids
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.ta_base_tokenizer
__a : Optional[int] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
__a : str = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = self.ta_base_tokenizer
__a : int = 'Unicode €.'
__a : Optional[Any] = tokenizer(a__ )
__a : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , a__ )
# decoding
__a : Optional[int] = tokenizer.decode(a__ )
self.assertEqual(a__ , 'Unicode €.</s>' )
__a : List[str] = tokenizer('e è é ê ë' )
__a : Optional[Any] = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , a__ )
# decoding
__a : Any = tokenizer.decode(a__ )
self.assertEqual(a__ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.ta_base_tokenizer
__a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__a : Optional[int] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__a : List[Any] = tokenizer(a__ , padding=a__ , return_tensors=a__ )
self.assertIsInstance(a__ , a__ )
if FRAMEWORK != "jax":
__a : Optional[Any] = list(batch.input_ids.numpy()[0] )
else:
__a : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a__ , a__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.ta_base_tokenizer
__a : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__a : List[str] = tokenizer(a__ , padding=a__ , return_tensors=a__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , a__ )
self.assertIn('attention_mask' , a__ )
self.assertNotIn('decoder_input_ids' , a__ )
self.assertNotIn('decoder_attention_mask' , a__ )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.ta_base_tokenizer
__a : Union[str, Any] = [
'Summary of the text.',
'Another summary.',
]
__a : List[Any] = tokenizer(
text_target=a__ , max_length=32 , padding='max_length' , truncation=a__ , return_tensors=a__ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.ta_base_tokenizer
__a : Optional[Any] = ['A long paragraph for summarization. </s>']
__a : Union[str, Any] = ['Summary of the text. </s>']
# fmt: off
__a : int = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__a : Dict = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__a : str = tokenizer(a__ , text_target=a__ )
self.assertEqual(a__ , batch['input_ids'][0] )
self.assertEqual(a__ , batch['labels'][0] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__a : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__a : List[str] = tempfile.mkdtemp()
__a : Dict = ' He is very happy, UNwant\u00E9d,running'
__a : int = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
__a : List[str] = tokenizer.__class__.from_pretrained(a__ )
__a : Optional[Any] = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
shutil.rmtree(a__ )
__a : Optional[int] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__a : Optional[int] = tempfile.mkdtemp()
__a : Dict = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__a : int = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__a : List[Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
__a : List[str] = tokenizer.__class__.from_pretrained(a__ )
__a : List[str] = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__a : List[str] = tokenizer.__class__.from_pretrained(a__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a__ )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a__ )
with open(os.path.join(a__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__a : Any = json.load(a__ )
with open(os.path.join(a__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__a : Any = json.load(a__ )
__a : Tuple = [f"""<extra_id_{i}>""" for i in range(125 )]
__a : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
__a : str = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(a__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(a__ , a__ )
with open(os.path.join(a__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(a__ , a__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__a : Any = tokenizer_class.from_pretrained(
a__ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__a : Union[str, Any] = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=a__ )]
__a : Dict = tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a__ )
__a : Union[str, Any] = tokenizer_class.from_pretrained(a__ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = self.get_tokenizers(fast=a__ , do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__a : List[str] = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
__a : Tuple = tokenizer.convert_tokens_to_string(a__ )
self.assertIsInstance(a__ , a__ )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__a : Any = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__a : Union[str, Any] = 0
__a : Dict = tokenizer.convert_ids_to_tokens(
a__ , skip_special_tokens=a__ )
for attr in attributes_list:
setattr(a__ , attr + '_id' , a__ )
self.assertEqual(getattr(a__ , a__ ) , a__ )
self.assertEqual(getattr(a__ , attr + '_id' ) , a__ )
setattr(a__ , attr + '_id' , a__ )
self.assertEqual(getattr(a__ , a__ ) , a__ )
self.assertEqual(getattr(a__ , attr + '_id' ) , a__ )
setattr(a__ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(a__ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(a__ , 'additional_special_tokens_ids' ) , [] )
setattr(a__ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(a__ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(a__ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 476 |
'''simple docstring'''
import math
_lowerCAmelCase = 10
_lowerCAmelCase = 7
_lowerCAmelCase = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( a = 20 ) -> str:
'''simple docstring'''
__magic_name__ = math.comb(a , a )
__magic_name__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , a )
__magic_name__ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 432 | 0 |
'''simple docstring'''
__snake_case : List[Any] = 256
# Modulus to hash a string
__snake_case : str = 1_000_003
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : str ) -> bool:
A_ = len(_UpperCamelCase )
A_ = len(_UpperCamelCase )
if p_len > t_len:
return False
A_ = 0
A_ = 0
A_ = 1
# Calculating the hash of pattern and substring of text
for i in range(_UpperCamelCase ):
A_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
A_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
A_ = (modulus_power * alphabet_size) % modulus
for i in range(0, t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
A_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _UpperCAmelCase ( ) -> None:
A_ = '''abc1abc12'''
A_ = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
A_ = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase ) and not rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 2)
A_ = '''ABABX'''
A_ = '''ABABZABABYABABX'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 3)
A_ = '''AAAB'''
A_ = '''ABAAAAAB'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 4)
A_ = '''abcdabcy'''
A_ = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 5)
A_ = '''Lü'''
A_ = '''Lüsai'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
A_ = '''Lue'''
assert not rabin_karp(_UpperCamelCase, _UpperCamelCase )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 174 | '''simple docstring'''
from itertools import product
def _UpperCAmelCase ( _UpperCamelCase : int, _UpperCamelCase : int ) -> list[int]:
A_ = sides_number
A_ = max_face_number * dice_number
A_ = [0] * (max_total + 1)
A_ = 1
A_ = range(_UpperCamelCase, max_face_number + 1 )
for dice_numbers in product(_UpperCamelCase, repeat=_UpperCamelCase ):
A_ = sum(_UpperCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def _UpperCAmelCase ( ) -> float:
A_ = total_frequency_distribution(
sides_number=4, dice_number=9 )
A_ = total_frequency_distribution(
sides_number=6, dice_number=6 )
A_ = 0
A_ = 9
A_ = 4 * 9
A_ = 6
for peter_total in range(_UpperCamelCase, max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
A_ = (4**9) * (6**6)
A_ = peter_wins_count / total_games_number
A_ = round(_UpperCamelCase, ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174 | 1 |
import json
import sys
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Dict ) -> Dict:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : Optional[Any] = json.load(snake_case__ )
UpperCamelCase : int = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(snake_case__ ):
UpperCamelCase : List[str] = results[benchmark_name]
UpperCamelCase : Tuple = benchmark_name.split('/' )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
UpperCamelCase : Optional[Any] = '| metric |'
UpperCamelCase : List[Any] = '|--------|'
UpperCamelCase : str = '| new / old (diff) |'
for metric_name in sorted(snake_case__ ):
UpperCamelCase : Dict = benchmark_res[metric_name]
UpperCamelCase : str = metric_vals['new']
UpperCamelCase : Optional[int] = metric_vals.get('old' , snake_case__ )
UpperCamelCase : Optional[Any] = metric_vals.get('diff' , snake_case__ )
UpperCamelCase : Union[str, Any] = F""" {new_val:f}""" if isinstance(snake_case__ , (int, float) ) else 'None'
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(snake_case__ , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(snake_case__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(snake_case__ ) )
if __name__ == "__main__":
__UpperCAmelCase = sys.argv[1]
__UpperCAmelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 40 | from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
lowercase : Tuple = BlenderbotConfig
lowercase : Optional[int] = {}
lowercase : Union[str, Any] = "gelu"
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=13 , SCREAMING_SNAKE_CASE__ : int=7 , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Optional[int]=99 , SCREAMING_SNAKE_CASE__ : Any=32 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : List[Any]=37 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=20 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : int=1 , SCREAMING_SNAKE_CASE__ : str=0 , ) -> List[str]:
A : List[Any] =parent
A : str =batch_size
A : int =seq_length
A : Optional[Any] =is_training
A : List[str] =use_labels
A : List[Any] =vocab_size
A : Tuple =hidden_size
A : List[Any] =num_hidden_layers
A : int =num_attention_heads
A : Optional[int] =intermediate_size
A : List[Any] =hidden_dropout_prob
A : Union[str, Any] =attention_probs_dropout_prob
A : Optional[Any] =max_position_embeddings
A : Optional[Any] =eos_token_id
A : List[str] =pad_token_id
A : Union[str, Any] =bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
A : Dict =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A : Optional[int] =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A : List[Any] =tf.concat([input_ids, eos_tensor] , axis=1 )
A : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Any =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A : int =prepare_blenderbot_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
A : List[Any] =TFBlenderbotModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder()
A : int =inputs_dict['input_ids']
A : Any =input_ids[:1, :]
A : Optional[Any] =inputs_dict['attention_mask'][:1, :]
A : List[str] =inputs_dict['head_mask']
A : List[Any] =1
# first forward pass
A : Any =model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
A , A : Any =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A : List[Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
A : Tuple =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A : Union[str, Any] =tf.concat([input_ids, next_tokens] , axis=-1 )
A : List[Any] =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A : List[str] =model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )[0]
A : Optional[Any] =model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A : Tuple =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A : Tuple =output_from_no_past[:, -3:, random_slice_idx]
A : str =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , rtol=1e-3 )
def A__ ( lowercase: List[str], lowercase: List[str], lowercase: Optional[Any], lowercase: Optional[Any]=None, lowercase: Tuple=None, lowercase: List[str]=None, lowercase: Union[str, Any]=None, lowercase: Dict=None, ) -> Dict:
if attention_mask is None:
A : Any =tf.cast(tf.math.not_equal(lowercase, config.pad_token_id ), tf.inta )
if decoder_attention_mask is None:
A : Optional[Any] =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ),
], axis=-1, )
if head_mask is None:
A : List[Any] =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A : Tuple =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A : List[str] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : Tuple = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowercase : Any = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowercase : Any = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase : Tuple = True
lowercase : Dict = False
lowercase : int = False
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
A : Optional[Any] =TFBlenderbotModelTester(self )
A : Dict =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Tuple:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Union[str, Any]:
A : Any =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE__ )
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] = ["My friends are cool but they eat too many carbs."]
lowercase : str = "facebook/blenderbot-400M-distill"
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Union[str, Any]:
A : Tuple =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[Any]:
A : Optional[Any] =self.tokenizer(self.src_text , return_tensors='tf' )
A : Tuple =self.model.generate(
model_inputs.input_ids , )
A : Union[str, Any] =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=SCREAMING_SNAKE_CASE__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 305 | 0 |
def __lowerCAmelCase ( _UpperCamelCase : int = 2_00_00_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 0
for i in range(_UpperCamelCase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 673 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673 | 1 |
from math import isqrt
def __A ( _lowercase ):
'''simple docstring'''
_A = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _lowercase , _lowercase ):
_A = False
return [i for i in range(2 , _lowercase ) if is_prime[i]]
def __A ( _lowercase = 10**8 ):
'''simple docstring'''
_A = calculate_prime_numbers(max_number // 2 )
_A = 0
_A = 0
_A = len(_lowercase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'{solution() = }')
| 484 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 484 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
UpperCamelCase = {
"""configuration_audio_spectrogram_transformer""": [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ASTConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ASTForAudioClassification""",
"""ASTModel""",
"""ASTPreTrainedModel""",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""ASTFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 707 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
UpperCamelCase = True
except ImportError:
UpperCamelCase = False
try:
from torch.hub import _get_torch_home
UpperCamelCase = _get_torch_home()
except ImportError:
UpperCamelCase = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
UpperCamelCase = os.path.join(torch_cache_home, """transformers""")
UpperCamelCase = """https://cdn.huggingface.co"""
UpperCamelCase = """https://s3.amazonaws.com/models.huggingface.co/bert"""
UpperCamelCase = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
UpperCamelCase = os.path.join(PATH, """config.yaml""")
UpperCamelCase = os.path.join(PATH, """attributes.txt""")
UpperCamelCase = os.path.join(PATH, """objects.txt""")
UpperCamelCase = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
UpperCamelCase = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
UpperCamelCase = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
UpperCamelCase = """pytorch_model.bin"""
UpperCamelCase = """config.yaml"""
def lowerCAmelCase ( UpperCamelCase_: List[str]=OBJECTS , UpperCamelCase_: str=ATTRIBUTES ) -> str:
'''simple docstring'''
_a = []
with open(UpperCamelCase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_a = []
with open(UpperCamelCase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCAmelCase ( UpperCamelCase_: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a = OrderedDict()
with open(UpperCamelCase_ , "rb" ) as f:
_a = pkl.load(UpperCamelCase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_a = ckp.pop(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , np.ndarray ):
_a = torch.tensor(UpperCamelCase_ )
else:
assert isinstance(UpperCamelCase_ , torch.tensor ), type(UpperCamelCase_ )
_a = v
return r
class lowercase_ :
A__ : Tuple = {}
def __init__( self , a_ , a_ = "root" , a_=0 ) ->Optional[Any]:
'''simple docstring'''
_a = name
_a = level
_a = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_a = copy.deepcopy(a_ )
_a = copy.deepcopy(a_ )
if isinstance(a_ , a_ ):
_a = Config(a_ , name=a_ , level=level + 1 )
_a = v
setattr(self , a_ , a_ )
_a = d
def __repr__( self ) ->Optional[int]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , a_ , a_ ) ->str:
'''simple docstring'''
_a = val
_a = val
_a = key.split("." )
_a = len(a_ ) - 1
_a = self._pointer
if len(a_ ) > 1:
for i, l in enumerate(a_ ):
if hasattr(self , a_ ) and isinstance(getattr(self , a_ ) , a_ ):
setattr(getattr(self , a_ ) , ".".join(levels[i:] ) , a_ )
if l == last_level:
_a = val
else:
_a = pointer[l]
def lowerCamelCase__ ( self ) ->int:
'''simple docstring'''
return self._pointer
def lowerCamelCase__ ( self , a_ , a_ ) ->Any:
'''simple docstring'''
with open(f'''{file_name}''' , "w" ) as stream:
dump(a_ , a_ )
def lowerCamelCase__ ( self , a_ , a_ ) ->int:
'''simple docstring'''
with open(f'''{file_name}''' , "w" ) as stream:
json.dump(a_ , a_ )
@staticmethod
def lowerCamelCase__ ( a_ ) ->Union[str, Any]:
'''simple docstring'''
with open(a_ ) as stream:
_a = load(a_ , Loader=a_ )
return data
def __str__( self ) ->List[Any]:
'''simple docstring'''
_a = " "
if self._name != "root":
_a = f'''{t * (self._level-1)}{self._name}:\n'''
else:
_a = ""
_a = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(a_ , a_ ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(a_ ).__name__})\n'''
_a = level
return r[:-1]
@classmethod
def lowerCamelCase__ ( cls , a_ , **a_ ) ->Tuple:
'''simple docstring'''
_a , _a = cls.get_config_dict(a_ , **a_ )
return cls(a_ )
@classmethod
def lowerCamelCase__ ( cls , a_ , **a_ ) ->List[Any]:
'''simple docstring'''
_a = kwargs.pop("cache_dir" , a_ )
_a = kwargs.pop("force_download" , a_ )
_a = kwargs.pop("resume_download" , a_ )
_a = kwargs.pop("proxies" , a_ )
_a = kwargs.pop("local_files_only" , a_ )
if os.path.isdir(a_ ):
_a = os.path.join(a_ , a_ )
elif os.path.isfile(a_ ) or is_remote_url(a_ ):
_a = pretrained_model_name_or_path
else:
_a = hf_bucket_url(a_ , filename=a_ , use_cdn=a_ )
try:
# Load from URL or cache if already cached
_a = cached_path(
a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_a = Config.load_yaml(a_ )
except EnvironmentError:
_a = "Can't load config for"
raise EnvironmentError(a_ )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(a_ ), kwargs
def lowerCAmelCase ( UpperCamelCase_: Optional[int] ) -> Tuple:
'''simple docstring'''
_a = torch.load("dump.pt" , map_location=in_tensor.device )
_a = in_tensor.numpy()
_a = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(UpperCamelCase_ , UpperCamelCase_ , rtol=0.01 , atol=0.1 ), (
f'''{sum([1 for x in np.isclose(UpperCamelCase_ , UpperCamelCase_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowerCAmelCase ( UpperCamelCase_: str ) -> List[str]:
'''simple docstring'''
_a = urlparse(UpperCamelCase_ )
return parsed.scheme in ("http", "https")
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Dict=True ) -> str:
'''simple docstring'''
_a = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_a = "/" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def lowerCAmelCase ( UpperCamelCase_: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Tuple=0 , UpperCamelCase_: Union[str, Any]=None , ) -> List[str]:
'''simple docstring'''
_a = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
ua += "; " + "; ".join("{}/{}".format(UpperCamelCase_ , UpperCamelCase_ ) for k, v in user_agent.items() )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
ua += "; " + user_agent
_a = {"user-agent": ua}
if resume_size > 0:
_a = "bytes=%d-" % (resume_size,)
_a = requests.get(UpperCamelCase_ , stream=UpperCamelCase_ , proxies=UpperCamelCase_ , headers=UpperCamelCase_ )
if response.status_code == 416: # Range not satisfiable
return
_a = response.headers.get("Content-Length" )
_a = resume_size + int(UpperCamelCase_ ) if content_length is not None else None
_a = tqdm(
unit="B" , unit_scale=UpperCamelCase_ , total=UpperCamelCase_ , initial=UpperCamelCase_ , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(UpperCamelCase_ ) )
temp_file.write(UpperCamelCase_ )
progress.close()
def lowerCAmelCase ( UpperCamelCase_: Optional[int] , UpperCamelCase_: str=None , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: int=10 , UpperCamelCase_: List[str]=False , UpperCamelCase_: int=None , UpperCamelCase_: List[str]=False , ) -> int:
'''simple docstring'''
if cache_dir is None:
_a = TRANSFORMERS_CACHE
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_a = str(UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
_a = None
if not local_files_only:
try:
_a = requests.head(UpperCamelCase_ , allow_redirects=UpperCamelCase_ , proxies=UpperCamelCase_ , timeout=UpperCamelCase_ )
if response.status_code == 200:
_a = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_a = url_to_filename(UpperCamelCase_ , UpperCamelCase_ )
# get cache path to put the file
_a = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(UpperCamelCase_ ):
return cache_path
else:
_a = [
file
for file in fnmatch.filter(os.listdir(UpperCamelCase_ ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(UpperCamelCase_ ) > 0:
return os.path.join(UpperCamelCase_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(UpperCamelCase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_a = cache_path + ".lock"
with FileLock(UpperCamelCase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(UpperCamelCase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_a = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(UpperCamelCase_ , "a+b" ) as f:
yield f
_a = _resumable_file_manager
if os.path.exists(UpperCamelCase_ ):
_a = os.stat(UpperCamelCase_ ).st_size
else:
_a = 0
else:
_a = partial(tempfile.NamedTemporaryFile , dir=UpperCamelCase_ , delete=UpperCamelCase_ )
_a = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , UpperCamelCase_ , temp_file.name , )
http_get(
UpperCamelCase_ , UpperCamelCase_ , proxies=UpperCamelCase_ , resume_size=UpperCamelCase_ , user_agent=UpperCamelCase_ , )
os.replace(temp_file.name , UpperCamelCase_ )
_a = {"url": url, "etag": etag}
_a = cache_path + ".json"
with open(UpperCamelCase_ , "w" ) as meta_file:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
return cache_path
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: Dict=None ) -> Any:
'''simple docstring'''
_a = url.encode("utf-8" )
_a = shaaaa(UpperCamelCase_ )
_a = url_hash.hexdigest()
if etag:
_a = etag.encode("utf-8" )
_a = shaaaa(UpperCamelCase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowerCAmelCase ( UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Tuple=False , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: str=False , UpperCamelCase_: Dict=None , UpperCamelCase_: int=False , UpperCamelCase_: Dict=False , UpperCamelCase_: Any=False , ) -> List[str]:
'''simple docstring'''
if cache_dir is None:
_a = TRANSFORMERS_CACHE
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_a = str(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_a = str(UpperCamelCase_ )
if is_remote_url(UpperCamelCase_ ):
# URL, so get it from the cache (downloading if necessary)
_a = get_from_cache(
UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , user_agent=UpperCamelCase_ , local_files_only=UpperCamelCase_ , )
elif os.path.exists(UpperCamelCase_ ):
# File, and it exists.
_a = url_or_filename
elif urlparse(UpperCamelCase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(UpperCamelCase_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(UpperCamelCase_ ) )
if extract_compressed_file:
if not is_zipfile(UpperCamelCase_ ) and not tarfile.is_tarfile(UpperCamelCase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_a , _a = os.path.split(UpperCamelCase_ )
_a = output_file.replace("." , "-" ) + "-extracted"
_a = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if os.path.isdir(UpperCamelCase_ ) and os.listdir(UpperCamelCase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_a = output_path + ".lock"
with FileLock(UpperCamelCase_ ):
shutil.rmtree(UpperCamelCase_ , ignore_errors=UpperCamelCase_ )
os.makedirs(UpperCamelCase_ )
if is_zipfile(UpperCamelCase_ ):
with ZipFile(UpperCamelCase_ , "r" ) as zip_file:
zip_file.extractall(UpperCamelCase_ )
zip_file.close()
elif tarfile.is_tarfile(UpperCamelCase_ ):
_a = tarfile.open(UpperCamelCase_ )
tar_file.extractall(UpperCamelCase_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(UpperCamelCase_ ) )
return output_path_extracted
return output_path
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: str="," ) -> Tuple:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if os.path.isfile(UpperCamelCase_ ):
with open(UpperCamelCase_ ) as f:
_a = eval(f.read() )
else:
_a = requests.get(UpperCamelCase_ )
try:
_a = requests.json()
except Exception:
_a = req.content.decode()
assert data is not None, "could not connect"
try:
_a = eval(UpperCamelCase_ )
except Exception:
_a = data.split("\n" )
req.close()
return data
def lowerCAmelCase ( UpperCamelCase_: Dict ) -> Union[str, Any]:
'''simple docstring'''
_a = requests.get(UpperCamelCase_ )
_a = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCAmelCase ( UpperCamelCase_: int ) -> Tuple:
'''simple docstring'''
_a = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(UpperCamelCase_ )
with open(UpperCamelCase_ , "rb" ) as stream:
_a = pkl.load(UpperCamelCase_ )
_a = weights.pop("model" )
_a = {}
for k, v in model.items():
_a = torch.from_numpy(UpperCamelCase_ )
if "running_var" in k:
_a = torch.tensor([0] )
_a = k.replace("running_var" , "num_batches_tracked" )
_a = zero
return new
def lowerCAmelCase ( ) -> str:
'''simple docstring'''
print(f'''{os.path.abspath(os.path.join(UpperCamelCase_ , os.pardir ) )}/demo.ipynb''' )
def lowerCAmelCase ( UpperCamelCase_: List[str] , UpperCamelCase_: List[str]="RGB" ) -> List[Any]:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if os.path.isfile(UpperCamelCase_ ):
_a = cva.imread(UpperCamelCase_ )
else:
_a = get_image_from_url(UpperCamelCase_ )
assert img is not None, f'''could not connect to: {im}'''
_a = cva.cvtColor(UpperCamelCase_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
_a = img[:, :, ::-1]
return img
def lowerCAmelCase ( UpperCamelCase_: Any , UpperCamelCase_: str=1 ) -> Any:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ))
| 612 | 0 |
def UpperCamelCase_( __magic_name__ : int = 10**12 ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = 1
_lowerCAmelCase :str = 0
_lowerCAmelCase :List[str] = 1
_lowerCAmelCase :Any = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'''{solution() = }''') | 687 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a = """\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",
author = \"Lin, Chin-Yew and
Och, Franz Josef\",
booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",
month = \"aug 23{--}aug 27\",
year = \"2004\",
address = \"Geneva, Switzerland\",
publisher = \"COLING\",
url = \"https://www.aclweb.org/anthology/C04-1072\",
pages = \"501--507\",
}
"""
a = """\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,
the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
"""
a = """
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
'bleu': bleu score,
'precisions': geometric mean of n-gram precisions,
'brevity_penalty': brevity penalty,
'length_ratio': ratio of lengths,
'translation_length': translation_length,
'reference_length': reference_length
Examples:
>>> predictions = [
... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample
... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample
... ]
>>> references = [
... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)
... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric(\"bleu\")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results[\"bleu\"])
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ):
_lowerCAmelCase :Any = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 687 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : torch.FloatTensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : str , lowerCamelCase__ : int = 6_55_36 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : str = "fourier" , lowerCamelCase__ : bool = True , lowerCamelCase__ : bool = False , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase__ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase__ : Tuple[str] = "UNetMidBlock1D" , lowerCamelCase__ : str = None , lowerCamelCase__ : Tuple[int] = (32, 32, 64) , lowerCamelCase__ : str = None , lowerCamelCase__ : int = 8 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : bool = False , ) ->Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Optional[int] = sample_size
# time
if time_embedding_type == "fourier":
_UpperCAmelCase : List[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase__ , log=lowerCamelCase__ , flip_sin_to_cos=lowerCamelCase__ )
_UpperCAmelCase : Tuple = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_UpperCAmelCase : Optional[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase__ , downscale_freq_shift=lowerCamelCase__ )
_UpperCAmelCase : List[str] = block_out_channels[0]
if use_timestep_embedding:
_UpperCAmelCase : List[Any] = block_out_channels[0] * 4
_UpperCAmelCase : List[str] = TimestepEmbedding(
in_channels=lowerCamelCase__ , time_embed_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ , out_dim=block_out_channels[0] , )
_UpperCAmelCase : List[str] = nn.ModuleList([] )
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Union[str, Any] = nn.ModuleList([] )
_UpperCAmelCase : Dict = None
# down
_UpperCAmelCase : List[str] = in_channels
for i, down_block_type in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : str = output_channel
_UpperCAmelCase : Any = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_UpperCAmelCase : Optional[int] = i == len(lowerCamelCase__ ) - 1
_UpperCAmelCase : List[str] = get_down_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase__ )
# mid
_UpperCAmelCase : List[Any] = get_mid_block(
lowerCamelCase__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase__ , add_downsample=lowerCamelCase__ , )
# up
_UpperCAmelCase : Optional[Any] = list(reversed(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = reversed_block_out_channels[0]
if out_block_type is None:
_UpperCAmelCase : List[str] = out_channels
else:
_UpperCAmelCase : List[str] = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : Any = output_channel
_UpperCAmelCase : int = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase__ ) - 1 else final_upsample_channels
)
_UpperCAmelCase : Tuple = i == len(lowerCamelCase__ ) - 1
_UpperCAmelCase : Dict = get_up_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase__ )
_UpperCAmelCase : Tuple = output_channel
# out
_UpperCAmelCase : Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_UpperCAmelCase : Optional[Any] = get_out_block(
out_block_type=lowerCamelCase__ , num_groups_out=lowerCamelCase__ , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase__ , act_fn=lowerCamelCase__ , fc_dim=block_out_channels[-1] // 4 , )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : Union[torch.Tensor, float, int] , lowerCamelCase__ : bool = True , ) ->Union[UNetaDOutput, Tuple]:
'''simple docstring'''
_UpperCAmelCase : Any = timestep
if not torch.is_tensor(lowerCamelCase__ ):
_UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
_UpperCAmelCase : List[Any] = timesteps[None].to(sample.device )
_UpperCAmelCase : List[str] = self.time_proj(lowerCamelCase__ )
if self.config.use_timestep_embedding:
_UpperCAmelCase : Optional[int] = self.time_mlp(lowerCamelCase__ )
else:
_UpperCAmelCase : Optional[int] = timestep_embed[..., None]
_UpperCAmelCase : List[Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_UpperCAmelCase : Optional[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_UpperCAmelCase : List[Any] = ()
for downsample_block in self.down_blocks:
_UpperCAmelCase , _UpperCAmelCase : Tuple = downsample_block(hidden_states=lowerCamelCase__ , temb=lowerCamelCase__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_UpperCAmelCase : List[Any] = self.mid_block(lowerCamelCase__ , lowerCamelCase__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_UpperCAmelCase : Tuple = down_block_res_samples[-1:]
_UpperCAmelCase : Optional[int] = down_block_res_samples[:-1]
_UpperCAmelCase : Optional[Any] = upsample_block(lowerCamelCase__ , res_hidden_states_tuple=lowerCamelCase__ , temb=lowerCamelCase__ )
# 5. post-process
if self.out_block:
_UpperCAmelCase : Union[str, Any] = self.out_block(lowerCamelCase__ , lowerCamelCase__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase__ )
| 40 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=64 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[Any] = use_auxiliary_loss
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Optional[int] = max_size
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_UpperCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Union[str, Any] = [1, 1, 1, 1]
_UpperCAmelCase : Any = self.num_channels
_UpperCAmelCase : int = 64
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : int = self.hidden_dim
_UpperCAmelCase : List[Any] = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
_UpperCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) ->str:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : str = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_UpperCAmelCase : int = self.model_tester.get_config()
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Any = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_UpperCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_UpperCAmelCase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : str = inputs["pixel_values"].to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 40 | 1 |
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int = 3 , lowerCAmelCase_: int = 7 , lowerCAmelCase_: int = 1_0_0_0_0_0_0 ):
snake_case_ : Optional[Any] = 0
snake_case_ : int = 1
for current_denominator in range(1 , limit + 1 ):
snake_case_ : Dict = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
snake_case_ : int = current_numerator
snake_case_ : Any = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 666 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=30 , _UpperCamelCase=400 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=True , _UpperCamelCase=1 / 255 , _UpperCamelCase=True , )-> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_A = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_rescale
_A = rescale_factor
_A = do_pad
def UpperCamelCase ( self )-> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self , _UpperCamelCase , _UpperCamelCase=False )-> List[str]:
if not batched:
_A = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
_A , _A = image.size
else:
_A , _A = image.shape[1], image.shape[2]
if w < h:
_A = int(self.size['shortest_edge'] * h / w )
_A = self.size['shortest_edge']
elif w > h:
_A = self.size['shortest_edge']
_A = int(self.size['shortest_edge'] * w / h )
else:
_A = self.size['shortest_edge']
_A = self.size['shortest_edge']
else:
_A = []
for image in image_inputs:
_A , _A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
_A = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase_ ( UpperCAmelCase , unittest.TestCase ):
__UpperCAmelCase =DeformableDetrImageProcessor if is_vision_available() else None
def UpperCamelCase ( self )-> Optional[int]:
_A = DeformableDetrImageProcessingTester(self )
@property
def UpperCamelCase ( self )-> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self )-> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'image_std' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_rescale' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_pad' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'size' ) )
def UpperCamelCase ( self )-> Any:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCamelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def UpperCamelCase ( self )-> Optional[Any]:
pass
def UpperCamelCase ( self )-> Optional[Any]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A , _A = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
_A = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self )-> List[str]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self )-> Dict:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(_UpperCamelCase , return_tensors='pt' ).pixel_values
_A , _A = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase ( self )-> List[Any]:
# prepare image and target
_A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
_A = json.loads(f.read() )
_A = {'image_id': 3_9769, 'annotations': target}
# encode them
_A = DeformableDetrImageProcessor()
_A = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors='pt' )
# verify pixel values
_A = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
_A = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase )
_A = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
_A = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) )
# verify class_labels
_A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) )
# verify orig_size
_A = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) )
# verify size
_A = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) )
@slow
def UpperCamelCase ( self )-> Any:
# prepare image, target and masks_path
_A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
_A = json.loads(f.read() )
_A = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
_A = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
_A = DeformableDetrImageProcessor(format='coco_panoptic' )
_A = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors='pt' )
# verify pixel values
_A = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , _UpperCamelCase )
_A = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) )
# verify area
_A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _UpperCamelCase ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _UpperCamelCase )
_A = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _UpperCamelCase , atol=1e-3 ) )
# verify image_id
_A = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _UpperCamelCase ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _UpperCamelCase ) )
# verify class_labels
_A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _UpperCamelCase ) )
# verify masks
_A = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _UpperCamelCase )
# verify orig_size
_A = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _UpperCamelCase ) )
# verify size
_A = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _UpperCamelCase ) )
| 292 | 0 |
'''simple docstring'''
import math
def lowerCAmelCase (__A):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__A) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCAmelCase (__A = 0.1):
"""simple docstring"""
_a = 3
_a = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1):
primes += is_prime(__A)
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowercase_ = get_tests_dir("fixtures")
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = mock.Mock()
_a = 500
_a = {}
_a = HTTPError
_a = {}
# Download this model to make sure it's in the cache.
_a = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=A ) as mock_head:
_a = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ) -> str:
"""simple docstring"""
_a = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def a__ (self ) -> Dict:
"""simple docstring"""
with self.assertRaises(A ):
# config is in subfolder, the following should not work without specifying the subfolder
_a = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
_a = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(A )
@is_staging_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ) -> Any:
"""simple docstring"""
_a = TOKEN
HfFolder.save_token(A )
@classmethod
def a__ (cls ) -> Union[str, Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def a__ (self ) -> str:
"""simple docstring"""
_a = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id='''test-image-processor''' , push_to_hub=A , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = ViTImageProcessor.from_pretrained(A )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
A , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=A , use_auth_token=self._token )
_a = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
_a = CustomImageProcessor.from_pretrained(A )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
_a = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 352 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowerCAmelCase : Optional[int] = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 509 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : str = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__lowerCAmelCase : Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__lowerCAmelCase : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Any = '''whisper'''
A__ : Optional[int] = ['''past_key_values''']
A__ : str = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[Any] , _snake_case : Optional[int]=5_1865 , _snake_case : str=80 , _snake_case : Any=6 , _snake_case : Union[str, Any]=4 , _snake_case : int=6 , _snake_case : Any=4 , _snake_case : int=1536 , _snake_case : Any=1536 , _snake_case : str=0.0 , _snake_case : Dict=0.0 , _snake_case : Dict=5_0257 , _snake_case : List[str]=True , _snake_case : Dict=True , _snake_case : List[str]="gelu" , _snake_case : Union[str, Any]=256 , _snake_case : Optional[Any]=0.0 , _snake_case : Dict=0.0 , _snake_case : Union[str, Any]=0.0 , _snake_case : str=0.02 , _snake_case : Dict=False , _snake_case : Dict=1500 , _snake_case : Optional[int]=448 , _snake_case : Optional[Any]=5_0256 , _snake_case : Tuple=5_0256 , _snake_case : Optional[int]=5_0256 , _snake_case : List[str]=None , _snake_case : Tuple=[220, 5_0256] , _snake_case : Union[str, Any]=False , _snake_case : str=256 , _snake_case : List[str]=False , _snake_case : List[Any]=0.05 , _snake_case : Dict=10 , _snake_case : Any=2 , _snake_case : Dict=0.0 , _snake_case : Dict=10 , _snake_case : Optional[int]=0 , _snake_case : Tuple=7 , **_snake_case : Union[str, Any] , ):
__lowercase : Optional[Any] = vocab_size
__lowercase : List[Any] = num_mel_bins
__lowercase : Optional[int] = d_model
__lowercase : Tuple = encoder_layers
__lowercase : str = encoder_attention_heads
__lowercase : Any = decoder_layers
__lowercase : Tuple = decoder_attention_heads
__lowercase : List[Any] = decoder_ffn_dim
__lowercase : Any = encoder_ffn_dim
__lowercase : List[str] = dropout
__lowercase : str = attention_dropout
__lowercase : Tuple = activation_dropout
__lowercase : Dict = activation_function
__lowercase : Optional[Any] = init_std
__lowercase : List[str] = encoder_layerdrop
__lowercase : List[str] = decoder_layerdrop
__lowercase : Dict = use_cache
__lowercase : Dict = encoder_layers
__lowercase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase : str = max_source_positions
__lowercase : Dict = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowercase : str = classifier_proj_size
__lowercase : Any = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase : str = apply_spec_augment
__lowercase : Optional[int] = mask_time_prob
__lowercase : Any = mask_time_length
__lowercase : str = mask_time_min_masks
__lowercase : Any = mask_feature_prob
__lowercase : str = mask_feature_length
__lowercase : int = mask_feature_min_masks
__lowercase : Tuple = median_filter_width
super().__init__(
pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , suppress_tokens=_snake_case , begin_suppress_tokens=_snake_case , **_snake_case , )
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
@property
def snake_case_ ( self : Tuple ):
__lowercase : Union[str, Any] = OrderedDict(
[
('''input_features''', {0: '''batch''', 1: '''feature_size''', 2: '''encoder_sequence'''}),
] )
if self.use_past:
__lowercase : List[str] = {0: '''batch'''}
else:
__lowercase : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_snake_case , direction='''inputs''' )
return common_inputs
def snake_case_ ( self : str , _snake_case : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _snake_case : int = -1 , _snake_case : int = -1 , _snake_case : bool = False , _snake_case : Optional["TensorType"] = None , _snake_case : int = 2_2050 , _snake_case : float = 5.0 , _snake_case : int = 220 , ):
__lowercase : List[str] = OrderedDict()
__lowercase : Tuple = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_snake_case , framework=_snake_case , sampling_rate=_snake_case , time_duration=_snake_case , frequency=_snake_case , )
__lowercase : Any = encoder_inputs['''input_features'''].shape[2]
__lowercase : Dict = encoder_sequence_length // 2 if self.use_past else seq_length
__lowercase : List[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , _snake_case , _snake_case , _snake_case , _snake_case )
__lowercase : List[Any] = encoder_inputs.pop('''input_features''' )
__lowercase : Any = decoder_inputs.pop('''decoder_input_ids''' )
if "past_key_values" in decoder_inputs:
__lowercase : Dict = decoder_inputs.pop('''past_key_values''' )
return dummy_inputs
@property
def snake_case_ ( self : Tuple ):
return 1E-3
| 509 | 1 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__snake_case : List[str] = datasets.utils.logging.get_logger(__name__)
class A ( folder_based_builder.FolderBasedBuilderConfig ):
__UpperCAmelCase : bool = None
__UpperCAmelCase : bool = None
class A ( folder_based_builder.FolderBasedBuilder ):
__UpperCAmelCase : Any = datasets.Audio()
__UpperCAmelCase : int = """audio"""
__UpperCAmelCase : Union[str, Any] = AudioFolderConfig
__UpperCAmelCase : List[str] # definition at the bottom of the script
__UpperCAmelCase : Optional[int] = AudioClassification(audio_column="""audio""" , label_column="""label""" )
__snake_case : Tuple = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
__snake_case : Union[str, Any] = AUDIO_EXTENSIONS
| 691 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case : List[Any] = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _lowercase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : Union[str, Any]=None, lowerCamelCase__ : Dict=None, lowerCamelCase__ : Optional[int]=None ):
_a = True
while ask_again:
_a = input(lowerCamelCase__ )
try:
if default is not None and len(lowerCamelCase__ ) == 0:
return default
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(lowerCamelCase__ )
def _lowercase ( lowerCamelCase__ : Optional[Any], lowerCamelCase__ : Dict=[], lowerCamelCase__ : int=None, lowerCamelCase__ : Union[str, Any]=0 ):
_a = BulletMenu(lowerCamelCase__, lowerCamelCase__ )
_a = menu.run(default_choice=lowerCamelCase__ )
return convert_value(lowerCamelCase__ ) if convert_value is not None else result
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def _lowercase ( lowerCamelCase__ : Dict ):
_a = int(lowerCamelCase__ )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _lowercase ( lowerCamelCase__ : List[Any] ):
_a = int(lowerCamelCase__ )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def _lowercase ( lowerCamelCase__ : str ):
_a = int(lowerCamelCase__ )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def _lowercase ( lowerCamelCase__ : str ):
return {"yes": True, "no": False}[value.lower()]
class A ( argparse.RawDescriptionHelpFormatter ):
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = super()._format_usage(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
_a = usage.replace("<command> [<args>] " , "" )
return usage
| 691 | 1 |
def lowerCAmelCase_ ( A_):
UpperCamelCase__: Union[str, Any] = len(A_)
for i in range(A_):
for j in range(i + 1 ,A_):
if numbers[j] < numbers[i]:
UpperCamelCase__ , UpperCamelCase__: List[str] = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A__: List[str] = input('''Enter numbers separated by a comma:\n''').strip()
A__: Tuple = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 380 |
def lowerCAmelCase_ ( A_):
if not all(char in "01" for char in bin_string):
raise ValueError("Non-binary value was passed to the function")
if not bin_string:
raise ValueError("Empty string was passed to the function")
UpperCamelCase__: List[Any] = ""
while len(A_) % 3 != 0:
UpperCamelCase__: int = "0" + bin_string
UpperCamelCase__: Optional[int] = [
bin_string[index : index + 3]
for index in range(len(A_))
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
UpperCamelCase__: Union[str, Any] = 0
for index, val in enumerate(A_):
oct_val += int(2 ** (2 - index) * int(A_))
oct_string += str(A_)
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 380 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 601 |
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list[int] , __lowerCamelCase: list[int] , __lowerCamelCase: int ):
'''simple docstring'''
lowercase_ = [0] * no_of_processes
lowercase_ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCamelCase ):
lowercase_ = burst_time[i]
lowercase_ = []
lowercase_ = 0
lowercase_ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
lowercase_ = []
lowercase_ = -1
for i in range(__lowerCamelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
lowercase_ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
lowercase_ = i
total_time += burst_time[target_process]
completed += 1
lowercase_ = 0
lowercase_ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: list[int] , __lowerCamelCase: int , __lowerCamelCase: list[int] ):
'''simple docstring'''
lowercase_ = [0] * no_of_processes
for i in range(__lowerCamelCase ):
lowercase_ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = [2, 5, 3, 7]
SCREAMING_SNAKE_CASE__ = [0, 0, 0, 0]
SCREAMING_SNAKE_CASE__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
SCREAMING_SNAKE_CASE__ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 601 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : str = WavaVecaForSequenceClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
__lowercase : Any = downstream_dict['''projector.weight''']
__lowercase : Dict = downstream_dict['''projector.bias''']
__lowercase : str = downstream_dict['''model.post_net.linear.weight''']
__lowercase : Any = downstream_dict['''model.post_net.linear.bias''']
return model
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : str = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
__lowercase : Optional[int] = downstream_dict['''model.linear.weight''']
__lowercase : Dict = downstream_dict['''model.linear.bias''']
return model
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : int = WavaVecaForXVector.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
__lowercase : Optional[int] = downstream_dict['''connector.weight''']
__lowercase : List[str] = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase : Dict = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
__lowercase : Optional[Any] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
__lowercase : str = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowercase : int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowercase : str = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowercase : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowercase : Optional[int] = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : int = torch.load(__UpperCamelCase , map_location='''cpu''' )
__lowercase : Union[str, Any] = checkpoint['''Downstream''']
__lowercase : Tuple = WavaVecaConfig.from_pretrained(__UpperCamelCase )
__lowercase : str = WavaVecaFeatureExtractor.from_pretrained(
__UpperCamelCase , return_attention_mask=__UpperCamelCase , do_normalize=__UpperCamelCase )
__lowercase : List[Any] = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__lowercase : str = convert_classification(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith('''ForAudioFrameClassification''' ):
__lowercase : int = convert_diarization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith('''ForXVector''' ):
__lowercase : Any = convert_xvector(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
__lowercase : Tuple = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
a_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 76 |
from collections.abc import Callable
import numpy as np
def lowerCamelCase_ ( lowerCAmelCase__ : Callable , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> np.array:
'''simple docstring'''
A = int(np.ceil((x_end - xa) / step_size ) )
A = np.zeros((n + 1,) )
A = ya
A = xa
for k in range(lowerCAmelCase__ ):
A = y[k] + step_size * ode_func(lowerCAmelCase__ , y[k] )
A = y[k] + (
(step_size / 2) * (ode_func(lowerCAmelCase__ , y[k] ) + ode_func(x + step_size , lowerCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 106 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'configuration_blenderbot_small': [
'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotSmallConfig',
'BlenderbotSmallOnnxConfig',
],
'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['BlenderbotSmallTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotSmallForCausalLM',
'BlenderbotSmallForConditionalGeneration',
'BlenderbotSmallModel',
'BlenderbotSmallPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TFBlenderbotSmallForConditionalGeneration',
'TFBlenderbotSmallModel',
'TFBlenderbotSmallPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'FlaxBlenderbotSmallForConditionalGeneration',
'FlaxBlenderbotSmallModel',
'FlaxBlenderbotSmallPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 438 | '''simple docstring'''
import math
import os
import sys
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[str] = ''
try:
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as binary_file:
lowercase_ : Dict = binary_file.read()
for dat in data:
lowercase_ : Union[str, Any] = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lexicon.pop(SCREAMING_SNAKE_CASE_ )
lowercase_ : Any = last_match_id
if math.loga(SCREAMING_SNAKE_CASE_ ).is_integer():
for curr_key in lexicon:
lowercase_ : Tuple = '0' + lexicon[curr_key]
lowercase_ : Any = bin(SCREAMING_SNAKE_CASE_ )[2:]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase_ : Dict = {'0': '0', '1': '1'}
lowercase_ ,lowercase_ : Any = '', ''
lowercase_ : List[Any] = len(SCREAMING_SNAKE_CASE_ )
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase_ : str = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
index += 1
lowercase_ : List[Any] = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
lowercase_ : Optional[Any] = lexicon[curr_string]
result += last_match_id
return result
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[str] = os.path.getsize(SCREAMING_SNAKE_CASE_ )
lowercase_ : Any = bin(SCREAMING_SNAKE_CASE_ )[2:]
lowercase_ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
return "0" * (length_length - 1) + file_length_binary + compressed
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[str] = 8
try:
with open(SCREAMING_SNAKE_CASE_ , 'wb' ) as opened_file:
lowercase_ : List[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(SCREAMING_SNAKE_CASE_ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[str] = read_file_binary(SCREAMING_SNAKE_CASE_ )
lowercase_ : List[str] = compress_data(SCREAMING_SNAKE_CASE_ )
lowercase_ : Union[str, Any] = add_file_length(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
write_file_binary(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 438 | 1 |
import qiskit
def UpperCAmelCase ( a_ , a_ ) -> qiskit.result.counts.Counts:
"""simple docstring"""
__A = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
__A = qiskit.QuantumCircuit(a_ , a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__A = qiskit.execute(a_ , a_ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 55 |
from __future__ import annotations
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
__magic_name__ , __magic_name__ = array[indexa], array[indexa]
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
if length > 1:
__magic_name__ = int(length / 2 )
for i in range(A_, low + middle ):
comp_and_swap(A_, A_, i + middle, A_ )
bitonic_merge(A_, A_, A_, A_ )
bitonic_merge(A_, low + middle, A_, A_ )
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
if length > 1:
__magic_name__ = int(length / 2 )
bitonic_sort(A_, A_, A_, 1 )
bitonic_sort(A_, low + middle, A_, 0 )
bitonic_merge(A_, A_, A_, A_ )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
__lowerCAmelCase : List[Any] = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 529 | 0 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __lowerCAmelCase (SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None )-> str:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=UpperCAmelCase__ )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
__snake_case = field(
metadata={"help": "The csv file to plot."} , )
__snake_case = field(
default=_a , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
__snake_case = field(
default=_a , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
__snake_case = field(
default=_a , metadata={"help": "Disable logarithmic scale when plotting"} , )
__snake_case = field(
default=_a , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
__snake_case = field(
default=_a , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
__snake_case = list_field(
default=_a , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
try:
int(UpperCAmelCase__ )
return True
except ValueError:
return False
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> Tuple:
"""simple docstring"""
try:
float(UpperCAmelCase__ )
return True
except ValueError:
return False
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
snake_case_ = args
snake_case_ = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
snake_case_ = csv.DictReader(_A )
for row in reader:
snake_case_ = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
snake_case_ = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
snake_case_ = float(row['''result'''] )
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = plt.subplots()
snake_case_ = '''Time usage''' if self.args.is_time else '''Memory usage'''
snake_case_ = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
snake_case_ = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
snake_case_ = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
snake_case_ = self.result_dict[model_name]['''result''']
((snake_case_) , (snake_case_)) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
snake_case_ = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
snake_case_ = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_A , )
else:
snake_case_ = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((snake_case_) , (snake_case_)) = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
snake_case_ = np.asarray(_A , _A )[: len(_A )]
plt.scatter(
_A , _A , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(_A , _A , '''--''' )
title_str += F''' {label_model_name} vs.'''
snake_case_ = title_str[:-4]
snake_case_ = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_A )
plt.xlabel(_A )
plt.ylabel(_A )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __lowerCAmelCase ()-> Union[str, Any]:
"""simple docstring"""
snake_case_ = HfArgumentParser(UpperCAmelCase__ )
snake_case_ = parser.parse_args_into_dataclasses()[0]
snake_case_ = Plot(args=UpperCAmelCase__ )
plot.plot()
if __name__ == "__main__":
main() | 714 |
def __lowerCAmelCase (SCREAMING_SNAKE_CASE = 6008_5147_5143 )-> int:
"""simple docstring"""
try:
snake_case_ = int(SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
snake_case_ = 1
snake_case_ = 2
while i * i <= n:
while n % i == 0:
snake_case_ = i
n //= i
i += 1
if n > 1:
snake_case_ = n
return int(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f'''{solution() = }''') | 531 | 0 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : str = " " ) -> list:
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Dict = 0
for index, char in enumerate(_lowerCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
_UpperCAmelCase : Dict = index + 1
elif index + 1 == len(_lowerCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 238 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def UpperCamelCase ( _lowerCAmelCase : int, _lowerCAmelCase : int ) -> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCamelCase ( _lowerCAmelCase : int ) -> list[str]:
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Any = 11
_UpperCAmelCase : Tuple = int("""1""" + """0""" * digit_len )
for num in range(_lowerCAmelCase, _lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCAmelCase, _lowerCAmelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
_UpperCAmelCase : List[str] = 10
return solutions
def UpperCamelCase ( _lowerCAmelCase : int = 2 ) -> int:
_UpperCAmelCase : Dict = 1.0
for fraction in fraction_list(_lowerCAmelCase ):
_UpperCAmelCase : Tuple = Fraction(_lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 238 | 1 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 702 |
from __future__ import annotations
UpperCAmelCase__ : Dict = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def A ( snake_case__ : list[list[int]] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the reference grid
__snake_case = 1
__snake_case = [
[0 for col in range(len(grid[0] ) )] for row in range(len(snake_case__ ) )
] # the action grid
__snake_case = init[0]
__snake_case = init[1]
__snake_case = 0
__snake_case = g + heuristic[x][y] # cost from starting cell to destination cell
__snake_case = [[f, g, x, y]]
__snake_case = False # flag that is set when search is complete
__snake_case = False # flag set if we can't find expand
while not found and not resign:
if len(snake_case__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__snake_case = cell.pop()
__snake_case = next_cell[2]
__snake_case = next_cell[3]
__snake_case = next_cell[1]
if x == goal[0] and y == goal[1]:
__snake_case = True
else:
for i in range(len(snake_case__ ) ): # to try out different valid actions
__snake_case = x + DIRECTIONS[i][0]
__snake_case = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(snake_case__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__snake_case = g + cost
__snake_case = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__snake_case = 1
__snake_case = i
__snake_case = []
__snake_case = goal[0]
__snake_case = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__snake_case = x - DIRECTIONS[action[x][y]][0]
__snake_case = y - DIRECTIONS[action[x][y]][1]
__snake_case = xa
__snake_case = ya
invpath.append([x, y] )
__snake_case = []
for i in range(len(snake_case__ ) ):
path.append(invpath[len(snake_case__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ : str = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ : int = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ : int = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ : Optional[int] = 99
UpperCAmelCase__ , UpperCAmelCase__ : str = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 676 | 0 |
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> int:
while b:
_a , _a = b, a % b
return a
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> int:
return a if b == 0 else euclidean_gcd_recursive(_UpperCamelCase , a % b )
def __snake_case ( ) -> Union[str, Any]:
print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 487 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __snake_case ( _UpperCamelCase ) -> Optional[Any]:
_a = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __snake_case ( _UpperCamelCase ) -> List[str]:
_a , _a = emb.weight.shape
_a = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
_a = emb.weight.data
return lin_layer
def __snake_case ( _UpperCamelCase , _UpperCamelCase="facebook/mbart-large-en-ro" , _UpperCamelCase=False , _UpperCamelCase=False ) -> Union[str, Any]:
_a = torch.load(_UpperCamelCase , map_location='''cpu''' )['''model''']
remove_ignore_keys_(_UpperCamelCase )
_a = state_dict['''encoder.embed_tokens.weight'''].shape[0]
_a = MBartConfig.from_pretrained(_UpperCamelCase , vocab_size=_UpperCamelCase )
if mbart_aa and finetuned:
_a = '''relu'''
_a = state_dict['''decoder.embed_tokens.weight''']
_a = MBartForConditionalGeneration(_UpperCamelCase )
model.model.load_state_dict(_UpperCamelCase )
if finetuned:
_a = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
lowerCamelCase :Optional[int] = parser.parse_args()
lowerCamelCase :Optional[int] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 487 | 1 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _lowercase ):
"""simple docstring"""
super().__init__()
_lowerCAmelCase = nn.ModuleList(_lowercase )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(_lowercase , _lowercase , self.nets ) ):
_lowerCAmelCase , _lowerCAmelCase = controlnet(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , )
# merge samples
if i == 0:
_lowerCAmelCase , _lowerCAmelCase = down_samples, mid_sample
else:
_lowerCAmelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_lowercase , _lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _lowercase ( self , _lowercase , _lowercase = True , _lowercase = None , _lowercase = False , _lowercase = None , ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_lowercase , is_main_process=_lowercase , save_function=_lowercase , safe_serialization=_lowercase , variant=_lowercase , )
idx += 1
_lowerCAmelCase = model_path_to_save + F'_{idx}'
@classmethod
def _lowercase ( cls , _lowercase , **_lowercase ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_lowerCAmelCase = pretrained_model_path
while os.path.isdir(_lowercase ):
_lowerCAmelCase = ControlNetModel.from_pretrained(_lowercase , **_lowercase )
controlnets.append(_lowercase )
idx += 1
_lowerCAmelCase = pretrained_model_path + F'_{idx}'
logger.info(F'{len(_lowercase )} controlnets loaded from {pretrained_model_path}.' )
if len(_lowercase ) == 0:
raise ValueError(
F'No ControlNets found under {os.path.dirname(_lowercase )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(_lowercase )
| 162 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
_lowercase = 300 # TEMPERATURE (unit = K)
def A (__lowerCamelCase :float , __lowerCamelCase :float , __lowerCamelCase :float , ):
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 162 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.