code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import math
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> list:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = [True] * n
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
SCREAMING_SNAKE_CASE_ : Optional[int] = i * 2
while index < n:
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : List[str] = index + i
SCREAMING_SNAKE_CASE_ : int = [2]
for i in range(3 , lowerCamelCase_ , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase_ )
return primes
def __UpperCAmelCase ( lowerCamelCase_ : int = 99_99_66_66_33_33 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = math.floor(math.sqrt(lowerCamelCase_ ) ) + 1_00
SCREAMING_SNAKE_CASE_ : Optional[int] = prime_sieve(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = primes[prime_index]
while (last_prime**2) <= limit:
SCREAMING_SNAKE_CASE_ : Optional[int] = primes[prime_index + 1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = last_prime**2
SCREAMING_SNAKE_CASE_ : str = next_prime**2
# Get numbers divisible by lps(current)
SCREAMING_SNAKE_CASE_ : str = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
SCREAMING_SNAKE_CASE_ : List[Any] = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
SCREAMING_SNAKE_CASE_ : Any = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
SCREAMING_SNAKE_CASE_ : Union[str, Any] = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 717 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685 | 0 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __lt__( self ,snake_case__ ):
return self[-1] < other[-1]
def __eq__( self ,snake_case__ ):
return self[-1] == other[-1]
def __UpperCAmelCase ( lowerCamelCase_ : list ) -> list:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[Stack] = []
# sort into stacks
for element in collection:
SCREAMING_SNAKE_CASE_ : List[str] = Stack([element] )
SCREAMING_SNAKE_CASE_ : int = bisect_left(lowerCamelCase_ , lowerCamelCase_ )
if i != len(lowerCamelCase_ ):
stacks[i].append(lowerCamelCase_ )
else:
stacks.append(lowerCamelCase_ )
# use a heap-based merge to merge stack efficiently
SCREAMING_SNAKE_CASE_ : List[Any] = merge(*(reversed(lowerCamelCase_ ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCamelCase__ : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ : Tuple = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 718 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE_ : List[Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = TFAutoModel.from_pretrained(snake_case__ ,from_pt=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = AutoModel.from_pretrained(snake_case__ ,from_tf=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = TFAutoModelForPreTraining.from_pretrained(snake_case__ ,from_pt=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoModelForPreTraining.from_pretrained(snake_case__ ,from_tf=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : str = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(snake_case__ ,from_pt=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = TFAutoModelForCausalLM.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ ,from_pt=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = AutoModelForCausalLM.from_pretrained(snake_case__ ,from_tf=snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = AutoModelForCausalLM.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ ,from_tf=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : List[str] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = TFAutoModelWithLMHead.from_pretrained(snake_case__ ,from_pt=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = AutoModelWithLMHead.from_pretrained(snake_case__ ,from_tf=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : str = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(snake_case__ ,from_pt=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ ,from_pt=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = AutoModelForMaskedLM.from_pretrained(snake_case__ ,from_tf=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = AutoModelForMaskedLM.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ ,from_tf=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Any = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(snake_case__ ,from_pt=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ ,from_pt=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = AutoModelForSeqaSeqLM.from_pretrained(snake_case__ ,from_tf=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(
snake_case__ ,output_loading_info=snake_case__ ,from_tf=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = TFAutoModelForSequenceClassification.from_pretrained(snake_case__ ,from_pt=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(snake_case__ ,from_tf=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoConfig.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = TFAutoModelForQuestionAnswering.from_pretrained(snake_case__ ,from_pt=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = AutoModelForQuestionAnswering.from_pretrained(snake_case__ ,from_tf=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = TFAutoModelWithLMHead.from_pretrained(snake_case__ ,from_pt=snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
self.assertEqual(model.num_parameters() ,14410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case__ ) ,14410 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoModelWithLMHead.from_pretrained(snake_case__ ,from_tf=snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
self.assertEqual(model.num_parameters() ,14410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case__ ) ,14410 )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = TFAutoModelWithLMHead.from_pretrained(snake_case__ ,from_pt=snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
self.assertEqual(model.num_parameters() ,14410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case__ ) ,14410 )
SCREAMING_SNAKE_CASE_ : str = AutoModelWithLMHead.from_pretrained(snake_case__ ,from_tf=snake_case__ )
self.assertIsInstance(snake_case__ ,snake_case__ )
self.assertEqual(model.num_parameters() ,14410 )
self.assertEqual(model.num_parameters(only_trainable=snake_case__ ) ,14410 )
| 719 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 | 0 |
'''simple docstring'''
from collections.abc import Callable
class lowerCAmelCase_ :
def __init__( self ,snake_case__ = None ):
# Stores actual heap items.
SCREAMING_SNAKE_CASE_ : list = []
# Stores indexes of each item for supporting updates and deletion.
SCREAMING_SNAKE_CASE_ : dict = {}
# Stores current size of heap.
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
SCREAMING_SNAKE_CASE_ : int = key or (lambda snake_case__ : x)
def snake_case ( self ,snake_case__ ):
return int((i - 1) / 2 ) if i > 0 else None
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = int(2 * i + 2 )
return right if 0 < right < self.size else None
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
SCREAMING_SNAKE_CASE_ : int = self.arr[j], self.arr[i]
def snake_case ( self ,snake_case__ ,snake_case__ ):
return self.arr[i][1] < self.arr[j][1]
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = self._left(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._right(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = i
if left is not None and not self._cmp(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = left
if right is not None and not self._cmp(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = right
return valid_parent
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = self._parent(snake_case__ )
while parent is not None and not self._cmp(snake_case__ ,snake_case__ ):
self._swap(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = parent, self._parent(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = self._get_valid_parent(snake_case__ )
while valid_parent != index:
self._swap(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = valid_parent, self._get_valid_parent(snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.pos_map[item]
SCREAMING_SNAKE_CASE_ : Dict = [item, self.key(snake_case__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(snake_case__ )
self._heapify_down(snake_case__ )
def snake_case ( self ,snake_case__ ):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_ : Optional[int] = self.pos_map[item]
del self.pos_map[item]
SCREAMING_SNAKE_CASE_ : Tuple = self.arr[self.size - 1]
SCREAMING_SNAKE_CASE_ : Optional[int] = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(snake_case__ )
self._heapify_down(snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(snake_case__ )] )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = [item, self.key(snake_case__ )]
SCREAMING_SNAKE_CASE_ : Optional[int] = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def snake_case ( self ):
return self.arr[0] if self.size else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
UpperCamelCase__ : int = logging.get_logger(__name__)
UpperCamelCase__ : int = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : int = "bloom"
__a : Any = ["past_key_values"]
__a : int = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self ,snake_case__=250880 ,snake_case__=64 ,snake_case__=2 ,snake_case__=8 ,snake_case__=1E-5 ,snake_case__=0.02 ,snake_case__=True ,snake_case__=1 ,snake_case__=2 ,snake_case__=False ,snake_case__=0.0 ,snake_case__=0.0 ,snake_case__=1 ,snake_case__=False ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
# Backward compatibility with n_embed kwarg
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('n_embed' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_size if n_embed is None else n_embed
SCREAMING_SNAKE_CASE_ : Tuple = n_layer
SCREAMING_SNAKE_CASE_ : Dict = n_head
SCREAMING_SNAKE_CASE_ : Dict = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ : Tuple = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = use_cache
SCREAMING_SNAKE_CASE_ : Any = pretraining_tp
SCREAMING_SNAKE_CASE_ : Optional[int] = apply_residual_connection_post_layernorm
SCREAMING_SNAKE_CASE_ : Dict = hidden_dropout
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_dropout
SCREAMING_SNAKE_CASE_ : List[str] = bos_token_id
SCREAMING_SNAKE_CASE_ : Optional[int] = eos_token_id
SCREAMING_SNAKE_CASE_ : Union[str, Any] = slow_but_exact
super().__init__(bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : str = version.parse("1.12" )
def __init__( self ,snake_case__ ,snake_case__ = "default" ,snake_case__ = None ,snake_case__ = False ,):
super().__init__(snake_case__ ,task=snake_case__ ,patching_specs=snake_case__ ,use_past=snake_case__ )
if not getattr(self._config ,'pad_token_id' ,snake_case__ ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE_ : int = 0
@property
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(snake_case__ ,direction='inputs' ,inverted_values_shape=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = {0: 'batch', 1: 'past_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def snake_case ( self ):
return self._config.n_layer
@property
def snake_case ( self ):
return self._config.n_head
@property
def snake_case ( self ):
return 1E-3
def snake_case ( self ,snake_case__ ,snake_case__ = -1 ,snake_case__ = -1 ,snake_case__ = False ,snake_case__ = None ,):
SCREAMING_SNAKE_CASE_ : List[str] = super(snake_case__ ,self ).generate_dummy_inputs(
snake_case__ ,batch_size=snake_case__ ,seq_length=snake_case__ ,is_pair=snake_case__ ,framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE_ : Optional[int] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
SCREAMING_SNAKE_CASE_ : Dict = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._config.hidden_size // self.num_attention_heads
SCREAMING_SNAKE_CASE_ : Dict = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
SCREAMING_SNAKE_CASE_ : Tuple = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE_ : Tuple = common_inputs['attention_mask']
if self.use_past:
SCREAMING_SNAKE_CASE_ : str = ordered_inputs['attention_mask'].dtype
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(snake_case__ ,snake_case__ ,dtype=snake_case__ )] ,dim=1 )
return ordered_inputs
@property
def snake_case ( self ):
return 13
| 721 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 685 | 0 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : List[Any] = PegasusTokenizer
__a : List[str] = PegasusTokenizerFast
__a : List[str] = True
__a : List[Any] = True
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PegasusTokenizer(snake_case__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def snake_case ( self ,**snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
return ("This is a test", "This is a test")
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = '</s>'
SCREAMING_SNAKE_CASE_ : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) ,snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<pad>' )
self.assertEqual(vocab_keys[1] ,'</s>' )
self.assertEqual(vocab_keys[-1] ,'v' )
self.assertEqual(len(snake_case__ ) ,1103 )
def snake_case ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,1103 )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : List[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE_ : Tuple = rust_tokenizer([raw_input_str] ,return_tensors=snake_case__ ,add_special_tokens=snake_case__ ).input_ids[0]
SCREAMING_SNAKE_CASE_ : Any = py_tokenizer([raw_input_str] ,return_tensors=snake_case__ ,add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE_ : Tuple = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
SCREAMING_SNAKE_CASE_ : Dict = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer([raw_input_str] ,return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
SCREAMING_SNAKE_CASE_ : List[str] = 'To ensure a smooth flow of bank resolutions.'
SCREAMING_SNAKE_CASE_ : int = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
SCREAMING_SNAKE_CASE_ : str = tokenizer([raw_input_str] ,return_tensors=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ ,snake_case__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ['This is going to be way too long.' * 150, 'short example']
SCREAMING_SNAKE_CASE_ : Dict = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE_ : Any = self._large_tokenizer(snake_case__ ,padding=snake_case__ ,truncation=snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : List[str] = self._large_tokenizer(
text_target=snake_case__ ,max_length=5 ,padding=snake_case__ ,truncation=snake_case__ ,return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
@slow
def snake_case ( self ):
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'input_ids': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ ,model_name='google/bigbird-pegasus-large-arxiv' ,revision='ba85d0851d708441f91440d509690f1ab6353415' ,)
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = PegasusTokenizer
__a : List[str] = PegasusTokenizerFast
__a : Dict = True
__a : str = True
def snake_case ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ : List[str] = PegasusTokenizer(snake_case__ ,offset=0 ,mask_token_sent=snake_case__ ,mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def snake_case ( self ,**snake_case__ ):
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
return ("This is a test", "This is a test")
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE_ : Optional[int] = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE_ : str = rust_tokenizer([raw_input_str] ,return_tensors=snake_case__ ,add_special_tokens=snake_case__ ).input_ids[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = py_tokenizer([raw_input_str] ,return_tensors=snake_case__ ,add_special_tokens=snake_case__ ).input_ids[0]
self.assertListEqual(snake_case__ ,snake_case__ )
@require_torch
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = ['This is going to be way too long.' * 1000, 'short example']
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE_ : str = self._large_tokenizer(snake_case__ ,padding=snake_case__ ,truncation=snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Any = self._large_tokenizer(
text_target=snake_case__ ,max_length=5 ,padding=snake_case__ ,truncation=snake_case__ ,return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(snake_case__ ) == 2 # input_ids, attention_mask.
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
SCREAMING_SNAKE_CASE_ : Any = self._large_tokenizer(snake_case__ ).input_ids
self.assertListEqual(
snake_case__ ,[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] ,)
| 700 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
UpperCamelCase__ : Dict = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
UpperCamelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'https://pypi.org/pypi/diffusers/json'
SCREAMING_SNAKE_CASE_ : Optional[int] = json.loads(request.urlopen(lowerCamelCase_ ).read() )['releases'].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Path(lowerCamelCase_ ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] ) -> Any:
"""simple docstring"""
init_hf_modules()
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : List[Any] = f.read()
# Imports of the form `import .xxx`
SCREAMING_SNAKE_CASE_ : Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [module_file]
SCREAMING_SNAKE_CASE_ : Tuple = []
# Let's recurse through all relative imports
while not no_change:
SCREAMING_SNAKE_CASE_ : int = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : int = Path(lowerCamelCase_ ).parent
SCREAMING_SNAKE_CASE_ : int = [str(module_path / m ) for m in new_imports]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [f for f in new_import_files if f not in all_relative_imports]
SCREAMING_SNAKE_CASE_ : Any = [F'{f}.py' for f in new_import_files]
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = f.read()
# Imports of the form `import xxx`
SCREAMING_SNAKE_CASE_ : List[str] = re.findall('^\s*import\s+(\S+)\s*$' , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
SCREAMING_SNAKE_CASE_ : List[str] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE_ : List[str] = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = module_path.replace(os.path.sep , '.' )
SCREAMING_SNAKE_CASE_ : Any = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
SCREAMING_SNAKE_CASE_ : List[Any] = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
SCREAMING_SNAKE_CASE_ : List[str] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
SCREAMING_SNAKE_CASE_ : Any = cls
return pipeline_class
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = module_file_or_url
SCREAMING_SNAKE_CASE_ : Dict = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
SCREAMING_SNAKE_CASE_ : List[str] = get_diffusers_versions()
# cut ".dev0"
SCREAMING_SNAKE_CASE_ : Dict = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
SCREAMING_SNAKE_CASE_ : List[Any] = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
SCREAMING_SNAKE_CASE_ : int = F'v{revision}'
elif revision == "main":
SCREAMING_SNAKE_CASE_ : List[Any] = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
SCREAMING_SNAKE_CASE_ : Tuple = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Tuple = 'git'
SCREAMING_SNAKE_CASE_ : Dict = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
SCREAMING_SNAKE_CASE_ : List[str] = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
SCREAMING_SNAKE_CASE_ : Dict = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = use_auth_token
elif use_auth_token is True:
SCREAMING_SNAKE_CASE_ : int = HfFolder.get_token()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[Any] = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
SCREAMING_SNAKE_CASE_ : Any = submodule_path / commit_hash
SCREAMING_SNAKE_CASE_ : List[Any] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, os.PathLike] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[Union[str, os.PathLike]] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[Dict[str, str]] = None , lowerCamelCase_ : Optional[Union[bool, str]] = None , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = False , **lowerCamelCase_ : Dict , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace('.py' , '' ) )
| 685 | 0 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 701 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "visual_bert"
def __init__( self ,snake_case__=30522 ,snake_case__=768 ,snake_case__=512 ,snake_case__=12 ,snake_case__=12 ,snake_case__=3072 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=1E-12 ,snake_case__=False ,snake_case__=True ,snake_case__=1 ,snake_case__=0 ,snake_case__=2 ,**snake_case__ ,):
super().__init__(pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE_ : int = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 685 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
UpperCamelCase__ : List[str] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def __UpperCAmelCase ( lowerCamelCase_ : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {}
with open(lowerCamelCase_ , 'r' ) as file:
for line_number, line in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = line.strip()
if line:
SCREAMING_SNAKE_CASE_ : List[Any] = line.split()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = line_number
SCREAMING_SNAKE_CASE_ : Optional[int] = words[0]
SCREAMING_SNAKE_CASE_ : List[str] = value
return result
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple ) -> int:
"""simple docstring"""
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE_ : Dict = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : str = PARAM_MAPPING[full_name.split('.' )[-1]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'param'
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE_ : Tuple = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE_ : Any = hf_pointer
for attribute in hf_param_name.split('.' ):
SCREAMING_SNAKE_CASE_ : List[str] = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = shape_pointer.shape
# let's reduce dimension
SCREAMING_SNAKE_CASE_ : Tuple = value[0]
else:
SCREAMING_SNAKE_CASE_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ : int = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ : Dict = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ : str = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ : Tuple = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = value
else:
SCREAMING_SNAKE_CASE_ : List[str] = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = PARAM_MAPPING[full_name.split('.' )[-1]]
SCREAMING_SNAKE_CASE_ : Tuple = 'param'
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE_ : int = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE_ : str = '.'.join([key, hf_param_name] )
else:
SCREAMING_SNAKE_CASE_ : List[str] = key
SCREAMING_SNAKE_CASE_ : Optional[Any] = value if 'lm_head' in full_key else value[0]
UpperCamelCase__ : Optional[Any] = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Optional[Any]=None ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ : List[Any] = name.split(lowerCamelCase_ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE_ : str = mapped_key.replace('*' , lowerCamelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ : str = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'weight'
else:
SCREAMING_SNAKE_CASE_ : Dict = None
if hf_dict is not None:
rename_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return is_used
return is_used
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : List[Any] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ : Any = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ : List[str] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
else:
SCREAMING_SNAKE_CASE_ : Tuple = load_wavaveca_layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE_ : List[Any] = name.split('.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(items[0] )
SCREAMING_SNAKE_CASE_ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
SCREAMING_SNAKE_CASE_ : List[Any] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
SCREAMING_SNAKE_CASE_ : int = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
SCREAMING_SNAKE_CASE_ : int = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
SCREAMING_SNAKE_CASE_ : List[Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase_ )
@torch.no_grad()
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : str=None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : str=False ) -> Tuple:
"""simple docstring"""
if config_path is not None:
SCREAMING_SNAKE_CASE_ : str = WavaVecaConfig.from_pretrained(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaConfig()
if is_seq_class:
SCREAMING_SNAKE_CASE_ : Tuple = read_txt_into_dict(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = idalabel
SCREAMING_SNAKE_CASE_ : Optional[int] = WavaVecaForSequenceClassification(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Dict = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
feature_extractor.save_pretrained(lowerCamelCase_ )
elif is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE_ : Optional[Any] = Dictionary.load(lowerCamelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE_ : Optional[Any] = target_dict.pad_index
SCREAMING_SNAKE_CASE_ : List[str] = target_dict.bos_index
SCREAMING_SNAKE_CASE_ : Optional[int] = target_dict.eos_index
SCREAMING_SNAKE_CASE_ : List[str] = len(target_dict.symbols )
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(lowerCamelCase_ , 'vocab.json' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_ ) )
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : int = 1
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = WavaVecaCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Optional[int] = True if config.feat_extract_norm == 'layer' else False
SCREAMING_SNAKE_CASE_ : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = WavaVecaForCTC(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : Dict = WavaVecaForPreTraining(lowerCamelCase_ )
if is_finetuned or is_seq_class:
SCREAMING_SNAKE_CASE_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = argparse.Namespace(task='audio_pretraining' )
SCREAMING_SNAKE_CASE_ : Any = fairseq.tasks.setup_task(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
UpperCamelCase__ : Dict = parser.parse_args()
UpperCamelCase__ : Union[str, Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 702 |
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
def is_in_circle(lowerCamelCase_ : float , lowerCamelCase_ : float ) -> bool:
SCREAMING_SNAKE_CASE_ : Any = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
SCREAMING_SNAKE_CASE_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCamelCase_ ) )
# The ratio of the area for circle to square is pi/4.
SCREAMING_SNAKE_CASE_ : Tuple = proportion * 4
print(F'The estimated value of pi is {pi_estimate}' )
print(F'The numpy value of pi is {pi}' )
print(F'The total error is {abs(pi - pi_estimate )}' )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Callable[[float], float] , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(lowerCamelCase_ , lowerCamelCase_ ) ) for _ in range(lowerCamelCase_ ) ) * (max_value - min_value)
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(lowerCamelCase_ : float ) -> float:
return x
SCREAMING_SNAKE_CASE_ : str = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2
print('******************' )
print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {expected_value}' )
print(F'Total error is {abs(estimated_value - expected_value )}' )
print('******************' )
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> None:
"""simple docstring"""
def function_to_integrate(lowerCamelCase_ : float ) -> float:
return sqrt(4.0 - x * x )
SCREAMING_SNAKE_CASE_ : Dict = area_under_curve_estimator(
lowerCamelCase_ , lowerCamelCase_ , 0.0 , 2.0 )
print('******************' )
print('Estimating pi using area_under_curve_estimator' )
print(F'Estimated value is {estimated_value}' )
print(F'Expected value is {pi}' )
print(F'Total error is {abs(estimated_value - pi )}' )
print('******************' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCamelCase__ : List[Any] = 20_48
UpperCamelCase__ : Optional[Any] = 40_96
UpperCamelCase__ : Dict = 42
UpperCamelCase__ : Any = os.environ.pop('''PROCESS_TRAIN''', '''false''')
UpperCamelCase__ : str = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Optional[int]:
"""simple docstring"""
def choose_first(lowerCamelCase_ : Any , lowerCamelCase_ : Any=False ):
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if len(lowerCamelCase_ ) == 1:
SCREAMING_SNAKE_CASE_ : int = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
SCREAMING_SNAKE_CASE_ : int = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'id': example['id']}
SCREAMING_SNAKE_CASE_ : int = example['annotations']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
SCREAMING_SNAKE_CASE_ : int = ['yes'] if 1 in yes_no_answer else ['no']
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : str = ['<cls>']
else:
SCREAMING_SNAKE_CASE_ : str = ['short']
SCREAMING_SNAKE_CASE_ : List[str] = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
SCREAMING_SNAKE_CASE_ : str = ['long']
SCREAMING_SNAKE_CASE_ : List[str] = choose_first(annotation['long_answer'] , is_long_answer=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = []
answer.update(lowerCamelCase_ )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , lowerCamelCase_ ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str=False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _get_single_answer(lowerCamelCase_ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
SCREAMING_SNAKE_CASE_ : Optional[Any] = example['document']['tokens']
SCREAMING_SNAKE_CASE_ : int = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(lowerCamelCase_ ),
"answer": {
"start_token": -1_00, # ignore index in cross-entropy
"end_token": -1_00, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
SCREAMING_SNAKE_CASE_ : Union[str, Any] = example['document']['tokens']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = answer['start_token']
SCREAMING_SNAKE_CASE_ : str = answer['end_token']
SCREAMING_SNAKE_CASE_ : Any = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
SCREAMING_SNAKE_CASE_ : Tuple = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
SCREAMING_SNAKE_CASE_ : Tuple = doc['is_html'][answer['start_token'] : answer['end_token']]
SCREAMING_SNAKE_CASE_ : str = doc['token'][answer['start_token'] : answer['end_token']]
SCREAMING_SNAKE_CASE_ : Optional[int] = ' '.join([old[i] for i in range(len(lowerCamelCase_ ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , lowerCamelCase_ , end='\n' )
print('Old:' , lowerCamelCase_ , end='\n\n' )
return {
"context": " ".join(lowerCamelCase_ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[Any]=20_48 , lowerCamelCase_ : Dict=40_96 , lowerCamelCase_ : int=True ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = get_context_and_ans(lowerCamelCase_ , assertion=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer(example['question']['text'] , out['context'] ).input_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : str = input_ids[:q_len]
SCREAMING_SNAKE_CASE_ : Optional[int] = range(lowerCamelCase_ , len(lowerCamelCase_ ) , max_length - doc_stride )
for i in doc_start_indices:
SCREAMING_SNAKE_CASE_ : Optional[Any] = i + max_length - q_len
SCREAMING_SNAKE_CASE_ : Tuple = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_00] * len(lowerCamelCase_ ),
"end_token": [-1_00] * len(lowerCamelCase_ ),
"category": category,
},
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = out['context'].split()
SCREAMING_SNAKE_CASE_ : List[Any] = splitted_context[answer['end_token']]
SCREAMING_SNAKE_CASE_ : List[Any] = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=lowerCamelCase_ , ).input_ids )
SCREAMING_SNAKE_CASE_ : Any = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=lowerCamelCase_ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
SCREAMING_SNAKE_CASE_ : int = len(tokenizer(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
SCREAMING_SNAKE_CASE_ : Any = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
SCREAMING_SNAKE_CASE_ : int = answer['start_token']
SCREAMING_SNAKE_CASE_ : Optional[int] = answer['end_token']
if assertion:
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.decode(lowerCamelCase_ )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , lowerCamelCase_ , end='\n\n' )
if len(lowerCamelCase_ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
SCREAMING_SNAKE_CASE_ : Tuple = input_ids[:q_len]
SCREAMING_SNAKE_CASE_ : List[Any] = range(lowerCamelCase_ , len(lowerCamelCase_ ) , max_length - doc_stride )
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Optional[int] = [] # null, yes, no, long, short
for i in doc_start_indices:
SCREAMING_SNAKE_CASE_ : int = i + max_length - q_len
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
SCREAMING_SNAKE_CASE_ : Optional[int] = start_token - i + q_len
SCREAMING_SNAKE_CASE_ : List[Any] = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = -1_00
SCREAMING_SNAKE_CASE_ : Union[str, Any] = -1_00
answers_category.append('null' )
SCREAMING_SNAKE_CASE_ : Any = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCamelCase_ )
answers_end_token.append(lowerCamelCase_ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(lowerCamelCase_ ) )
print('Old:' , tokenizer.decode(lowerCamelCase_ ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any]=20_48 , lowerCamelCase_ : Optional[Any]=40_96 , lowerCamelCase_ : Optional[Any]=False ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = get_strided_contexts_and_ans(
lowerCamelCase_ , lowerCamelCase_ , doc_stride=lowerCamelCase_ , max_length=lowerCamelCase_ , assertion=lowerCamelCase_ , )
return example
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : List[str] ) -> Any:
"""simple docstring"""
with jsonlines.open(lowerCamelCase_ , 'a' ) as writer:
for example in tqdm(lowerCamelCase_ , total=len(lowerCamelCase_ ) , desc='Saving samples ... ' ):
SCREAMING_SNAKE_CASE_ : Any = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCamelCase__ : Optional[int] = load_dataset('''natural_questions''')
UpperCamelCase__ : List[Any] = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
UpperCamelCase__ : int = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
UpperCamelCase__ : Any = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
UpperCamelCase__ : Any = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCamelCase__ : List[str] = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
UpperCamelCase__ : int = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 703 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self ,snake_case__ ,snake_case__=7 ,snake_case__=3 ,snake_case__=18 ,snake_case__=30 ,snake_case__=400 ,snake_case__=True ,snake_case__=None ,snake_case__=True ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = num_channels
SCREAMING_SNAKE_CASE_ : Dict = image_size
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : int = max_resolution
SCREAMING_SNAKE_CASE_ : Dict = do_resize
SCREAMING_SNAKE_CASE_ : Dict = size
SCREAMING_SNAKE_CASE_ : str = apply_ocr
def snake_case ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Dict = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = LayoutLMvaImageProcessingTester(self )
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ ,'do_resize' ) )
self.assertTrue(hasattr(snake_case__ ,'size' ) )
self.assertTrue(hasattr(snake_case__ ,'apply_ocr' ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def snake_case ( self ):
pass
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
self.assertIsInstance(encoding.words ,snake_case__ )
self.assertIsInstance(encoding.boxes ,snake_case__ )
# Test batched
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# Initialize image_processing
SCREAMING_SNAKE_CASE_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=snake_case__ ,torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ ,torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
# Test batched
SCREAMING_SNAKE_CASE_ : List[Any] = image_processing(snake_case__ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) ,)
def snake_case ( self ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE_ : Tuple = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_docvqa' ,split='test' )
SCREAMING_SNAKE_CASE_ : str = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ : Any = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ : Any = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ : Any = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,snake_case__ )
self.assertListEqual(encoding.boxes ,snake_case__ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ : Optional[int] = LayoutLMvaImageProcessor(apply_ocr=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(snake_case__ ,return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 685 | 0 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 704 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : str = logging.getLogger(__name__)
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : str
__a : str
__a : Optional[str] = None
__a : Optional[str] = None
__a : Optional[str] = None
@dataclass(frozen=lowerCamelCase_ )
class lowerCAmelCase_ :
__a : List[int]
__a : Optional[List[int]] = None
__a : Optional[List[int]] = None
__a : Optional[Union[int, float]] = None
__a : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = None ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
snake_case__ ,'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' ,tokenizer.__class__.__name__ ,str(snake_case__ ) ,snake_case__ ,) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE_ : Dict = cached_features_file + '.lock'
with FileLock(snake_case__ ):
if os.path.exists(snake_case__ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(snake_case__ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
SCREAMING_SNAKE_CASE_ : List[Any] = (
processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
)
logger.info('Training examples: %s' ,len(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[str] = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
logger.info('Saving features into cached file %s' ,snake_case__ )
torch.save(self.features ,snake_case__ )
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
__a : List[InputFeatures]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ = 128 ,snake_case__=False ,snake_case__ = False ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = hans_processors[task]()
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = label_list
SCREAMING_SNAKE_CASE_ : int = processor.get_dev_examples(snake_case__ ) if evaluate else processor.get_train_examples(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = hans_convert_examples_to_features(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(snake_case__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
SCREAMING_SNAKE_CASE_ : List[Any] = tf.data.Dataset.from_generator(
snake_case__ ,(
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) ,(
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self ,snake_case__ ):
return self.features[i]
def snake_case ( self ):
return self.label_list
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_train_set.txt' ) ) ,'train' )
def snake_case ( self ,snake_case__ ):
return self._create_examples(self._read_tsv(os.path.join(snake_case__ ,'heuristics_evaluation_set.txt' ) ) ,'dev' )
def snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, line in enumerate(snake_case__ ):
if i == 0:
continue
SCREAMING_SNAKE_CASE_ : List[str] = '%s-%s' % (set_type, line[0])
SCREAMING_SNAKE_CASE_ : Dict = line[5]
SCREAMING_SNAKE_CASE_ : Dict = line[6]
SCREAMING_SNAKE_CASE_ : Tuple = line[7][2:] if line[7].startswith('ex' ) else line[7]
SCREAMING_SNAKE_CASE_ : Optional[int] = line[0]
examples.append(InputExample(guid=snake_case__ ,text_a=snake_case__ ,text_b=snake_case__ ,label=snake_case__ ,pairID=snake_case__ ) )
return examples
def __UpperCAmelCase ( lowerCamelCase_ : List[InputExample] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : PreTrainedTokenizer , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {label: i for i, label in enumerate(lowerCamelCase_ )}
SCREAMING_SNAKE_CASE_ : Dict = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCamelCase_ ) , desc='convert examples to features' ):
if ex_index % 1_00_00 == 0:
logger.info('Writing example %d' % (ex_index) )
SCREAMING_SNAKE_CASE_ : Any = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' , truncation=lowerCamelCase_ , return_overflowing_tokens=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = label_map[example.label] if example.label in label_map else 0
SCREAMING_SNAKE_CASE_ : List[str] = int(example.pairID )
features.append(InputFeatures(**lowerCamelCase_ , label=lowerCamelCase_ , pairID=lowerCamelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(F'guid: {example}' )
logger.info(F'features: {features[i]}' )
return features
UpperCamelCase__ : str = {
'''hans''': 3,
}
UpperCamelCase__ : Dict = {
'''hans''': HansProcessor,
}
| 685 | 0 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Dataset.from_dict(lowerCamelCase_ )
return dataset
class lowerCAmelCase_ ( lowerCamelCase_ ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_dataset()
SCREAMING_SNAKE_CASE_ : List[str] = make_duplicate_clusters(snake_case__ ,0.85 )
self.assertEqual(len(duplicate_clusters[0] ) ,2 )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = get_dataset()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = deduplicate_dataset(snake_case__ )
self.assertEqual(len(snake_case__ ) ,2 )
print(snake_case__ )
self.assertEqual(duplicate_clusters[0][0]['copies'] ,2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] ,snake_case__ )
| 705 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 1_00 * 2**20, 9_00 * 2**20] )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : List[Any] ) -> int:
"""simple docstring"""
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
SCREAMING_SNAKE_CASE_ : str = dataset_size < in_memory_max_size
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = is_small_dataset(lowerCamelCase_ )
assert result == expected
| 685 | 0 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 706 |
from math import log
from scipy.constants import Boltzmann, physical_constants
UpperCamelCase__ : Any = 3_00 # TEMPERATURE (unit = K)
def __UpperCAmelCase ( lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
return choice(lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_pivot(lowerCamelCase_ )
# partition based on pivot
# linear time
SCREAMING_SNAKE_CASE_ : Dict = [e for e in lst if e < pivot]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowerCamelCase_ ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowerCamelCase_ ) < k - 1:
return kth_number(lowerCamelCase_ , k - len(lowerCamelCase_ ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ ( lowerCamelCase_ ):
pass
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[],
[],
[],
]
def snake_case ( self ,snake_case__ ,snake_case__ ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(snake_case__ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class lowerCAmelCase_ :
def __init__( self ):
SCREAMING_SNAKE_CASE_ : List[str] = []
def snake_case ( self ,snake_case__ ):
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(snake_case__ )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = min(self.queue )
self.queue.remove(snake_case__ )
return data
def __str__( self ):
return str(self.queue )
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase_ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase_ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 685 | 0 |
from __future__ import annotations
from typing import Any
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = num_of_nodes
SCREAMING_SNAKE_CASE_ : list[list[int]] = []
SCREAMING_SNAKE_CASE_ : dict[int, int] = {}
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
self.m_edges.append([u_node, v_node, weight] )
def snake_case ( self ,snake_case__ ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def snake_case ( self ,snake_case__ ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.find_component(snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case__ )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE_ : Dict = self.find_component(snake_case__ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE_ : Optional[int] = edge
SCREAMING_SNAKE_CASE_ : int = self.m_component[u]
SCREAMING_SNAKE_CASE_ : Tuple = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE_ : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = edge
SCREAMING_SNAKE_CASE_ : Any = self.m_component[u]
SCREAMING_SNAKE_CASE_ : List[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case__ ,snake_case__ ,snake_case__ )
print(F'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
SCREAMING_SNAKE_CASE_ : str = [-1] * self.m_num_of_nodes
print(F'The total weight of the minimal spanning tree is: {mst_weight}' )
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
def __UpperCAmelCase ( lowerCamelCase_ : int = 10_00 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 685 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : str = logging.get_logger()
@dataclass
class lowerCAmelCase_ :
__a : nn.Module
__a : List[nn.Module] = field(default_factory=lowerCamelCase_ )
__a : list = field(default_factory=lowerCamelCase_ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = len(list(m.modules() ) ) == 1 or isinstance(snake_case__ ,nn.Convad ) or isinstance(snake_case__ ,nn.BatchNormad )
if has_not_submodules:
self.traced.append(snake_case__ )
def __call__( self ,snake_case__ ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(snake_case__ )
[x.remove() for x in self.handles]
return self
@property
def snake_case ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda snake_case__ : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) )
@dataclass
class lowerCAmelCase_ :
__a : nn.Module
__a : nn.Module
__a : int = 0
__a : List = field(default_factory=lowerCamelCase_ )
__a : List = field(default_factory=lowerCamelCase_ )
def __call__( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Tracker(self.dest )(snake_case__ ).parametrized
SCREAMING_SNAKE_CASE_ : int = Tracker(self.src )(snake_case__ ).parametrized
SCREAMING_SNAKE_CASE_ : Tuple = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.src_skip ,snake_case__ ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(filter(lambda snake_case__ : type(snake_case__ ) not in self.dest_skip ,snake_case__ ) )
if len(snake_case__ ) != len(snake_case__ ):
raise Exception(
F'Numbers of operations are different. Source module has {len(snake_case__ )} operations while'
F' destination module has {len(snake_case__ )}.' )
for dest_m, src_m in zip(snake_case__ ,snake_case__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : ResNetConfig , lowerCamelCase_ : Path , lowerCamelCase_ : bool = True ) -> List[Any]:
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = timm.create_model(lowerCamelCase_ , pretrained=lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE_ : Tuple = ResNetForImageClassification(lowerCamelCase_ ).eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = ModuleTransfer(src=lowerCamelCase_ , dest=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(lowerCamelCase_ )
assert torch.allclose(from_model(lowerCamelCase_ ) , our_model(lowerCamelCase_ ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE_ : Tuple = F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCamelCase_ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
# we can use the convnext one
SCREAMING_SNAKE_CASE_ : Dict = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
print(F'Pushed {checkpoint_name}' )
def __UpperCAmelCase ( lowerCamelCase_ : Path , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = True ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ : Tuple = 10_00
SCREAMING_SNAKE_CASE_ : List[str] = (1, num_labels)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE_ : List[str] = num_labels
SCREAMING_SNAKE_CASE_ : int = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE_ : List[Any] = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE_ : Tuple = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ : Optional[int] = partial(lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(lowerCamelCase_ , names_to_config[model_name] , lowerCamelCase_ , lowerCamelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCamelCase__ : str = parser.parse_args()
UpperCamelCase__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 709 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Tuple = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[str] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Any = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Union[str, Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : List[Any] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Dict = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : Optional[int] = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
class lowerCAmelCase_ ( metaclass=lowerCamelCase_ ):
__a : str = ["flax"]
def __init__( self ,*snake_case__ ,**snake_case__ ):
requires_backends(self ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
@classmethod
def snake_case ( cls ,*snake_case__ ,**snake_case__ ):
requires_backends(cls ,['flax'] )
| 685 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase__ : Tuple = None
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ : Optional[Any] = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ : Optional[Any] = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
UpperCamelCase__ : Union[str, Any] = '''▁'''
# Segments (not really needed)
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Optional[Any] = 1
UpperCamelCase__ : List[Any] = 2
UpperCamelCase__ : Optional[int] = 3
UpperCamelCase__ : str = 4
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : str = VOCAB_FILES_NAMES
__a : str = PRETRAINED_VOCAB_FILES_MAP
__a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Optional[Any] = "left"
__a : Tuple = XLNetTokenizer
def __init__( self ,snake_case__=None ,snake_case__=None ,snake_case__=False ,snake_case__=True ,snake_case__=False ,snake_case__="<s>" ,snake_case__="</s>" ,snake_case__="<unk>" ,snake_case__="<sep>" ,snake_case__="<pad>" ,snake_case__="<cls>" ,snake_case__="<mask>" ,snake_case__=["<eop>", "<eod>"] ,**snake_case__ ,):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : List[Any] = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else mask_token
super().__init__(
vocab_file=snake_case__ ,tokenizer_file=snake_case__ ,do_lower_case=snake_case__ ,remove_space=snake_case__ ,keep_accents=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,unk_token=snake_case__ ,sep_token=snake_case__ ,pad_token=snake_case__ ,cls_token=snake_case__ ,mask_token=snake_case__ ,additional_special_tokens=snake_case__ ,**snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE_ : Optional[Any] = remove_space
SCREAMING_SNAKE_CASE_ : List[Any] = keep_accents
SCREAMING_SNAKE_CASE_ : List[str] = vocab_file
SCREAMING_SNAKE_CASE_ : Tuple = False if not self.vocab_file else True
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : int = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Tuple = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file ,snake_case__ )
return (out_vocab_file,)
| 710 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCamelCase__ : Union[str, Any] = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : Optional[Any] = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
UpperCamelCase__ : Any = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'''emoji''': True,
},
}
]
UpperCamelCase__ : Union[str, Any] = 0
for log in Path().glob('''*.log'''):
UpperCamelCase__ : Optional[int] = 0
with open(log, '''r''') as f:
for line in f:
UpperCamelCase__ : Any = json.loads(line)
if line.get('''nodeid''', '''''') != "":
UpperCamelCase__ : Tuple = line['''nodeid''']
if line.get('''duration''', None) is not None:
UpperCamelCase__ : List[Any] = F"""{line["duration"]:.4f}"""
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCamelCase__ : Tuple = []
log.unlink()
UpperCamelCase__ : List[Any] = ''''''
UpperCamelCase__ : List[str] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : Optional[int] = {}
for test in failed_tests:
UpperCamelCase__ : str = test[0].split('''::''')
UpperCamelCase__ : List[Any] = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
UpperCamelCase__ : int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCamelCase__ : str = [test[0] for test in failed_table]
UpperCamelCase__ : Union[str, Any] = list(set(files))
# Count number of instances in failed_tests
UpperCamelCase__ : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCamelCase__ : str = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 30_00:
UpperCamelCase__ : List[Any] = '''Too many failed tests, please see the full report in the Action results.'''
UpperCamelCase__ : Optional[Any] = len(err) + 10
UpperCamelCase__ : List[str] = message[: 30_00 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
UpperCamelCase__ : Optional[Any] = '''No failed tests! 🤗'''
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
UpperCamelCase__ : int = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
UpperCamelCase__ : Optional[int] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCamelCase__ : Optional[Any] = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCamelCase__ : Tuple = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
UpperCamelCase__ : Any = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCamelCase__ : int = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCamelCase__ : str = row[0]
else:
UpperCamelCase__ : str = ''''''
UpperCamelCase__ : Optional[Any] = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 685 | 0 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[int] = "Wav2Vec2FeatureExtractor"
__a : List[str] = "AutoTokenizer"
def __init__( self ,snake_case__ ,snake_case__ ):
super().__init__(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor
SCREAMING_SNAKE_CASE_ : Any = False
@classmethod
def snake_case ( cls ,snake_case__ ,**snake_case__ ):
try:
return super().from_pretrained(snake_case__ ,**snake_case__ )
except OSError:
warnings.warn(
F'Loading a tokenizer inside {cls.__name__} from a config that does not'
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' ,snake_case__ ,)
SCREAMING_SNAKE_CASE_ : List[str] = WavaVecaFeatureExtractor.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = WavaVecaCTCTokenizer.from_pretrained(snake_case__ ,**snake_case__ )
return cls(feature_extractor=snake_case__ ,tokenizer=snake_case__ )
def __call__( self ,*snake_case__ ,**snake_case__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*snake_case__ ,**snake_case__ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop('raw_speech' )
else:
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('audio' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = kwargs.pop('sampling_rate' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = kwargs.pop('text' ,snake_case__ )
if len(snake_case__ ) > 0:
SCREAMING_SNAKE_CASE_ : str = args[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
SCREAMING_SNAKE_CASE_ : int = self.feature_extractor(snake_case__ ,*snake_case__ ,sampling_rate=snake_case__ ,**snake_case__ )
if text is not None:
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(snake_case__ ,**snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = encodings['input_ids']
return inputs
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : int = kwargs.pop('input_features' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop('labels' ,snake_case__ )
if len(snake_case__ ) > 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = args[0]
SCREAMING_SNAKE_CASE_ : str = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor.pad(snake_case__ ,*snake_case__ ,**snake_case__ )
if labels is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer.pad(snake_case__ ,**snake_case__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE_ : str = labels['input_ids']
return input_features
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
return self.tokenizer.batch_decode(*snake_case__ ,**snake_case__ )
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
return self.tokenizer.decode(*snake_case__ ,**snake_case__ )
@contextmanager
def snake_case ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor
SCREAMING_SNAKE_CASE_ : int = False
| 711 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('Input value must be an \'int\' type' )
SCREAMING_SNAKE_CASE_ : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 685 | 0 |
from math import factorial
UpperCamelCase__ : Optional[Any] = {str(d): factorial(d) for d in range(10)}
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> int:
"""simple docstring"""
return sum(DIGIT_FACTORIAL[d] for d in str(lowerCamelCase_ ) )
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , lowerCamelCase_ ) if sum_of_digit_factorial(lowerCamelCase_ ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 712 |
import qiskit
def __UpperCAmelCase ( lowerCamelCase_ : int = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = qubits
# Using Aer's simulator
SCREAMING_SNAKE_CASE_ : Optional[int] = qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ : str = qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , lowerCamelCase_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , lowerCamelCase_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(lowerCamelCase_ ) ) , list(range(lowerCamelCase_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
SCREAMING_SNAKE_CASE_ : Tuple = qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {quantum_entanglement(3)}""")
| 685 | 0 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
UpperCamelCase__ : Dict = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__a : bool = field(default=lowerCamelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
__a : bool = field(
default=lowerCamelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__a : bool = field(default=lowerCamelCase_ , metadata={"help": "whether to use adafactor"} )
__a : Optional[float] = field(
default=lowerCamelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__a : Optional[float] = field(
default=lowerCamelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__a : Optional[float] = field(default=lowerCamelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
__a : Optional[float] = field(
default=lowerCamelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
__a : Optional[str] = field(
default="linear" , metadata={"help": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 713 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ : Optional[int] = str(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = ''.join(sorted(lowerCamelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __UpperCAmelCase ( lowerCamelCase_ : float = 99 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_00:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Dict = 1
while True:
if check_bouncy(lowerCamelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 685 | 0 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 714 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ : Dict = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = ['''ChineseCLIPFeatureExtractor''']
UpperCamelCase__ : Optional[int] = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[Any] = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 685 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[Any] = ["image_processor", "tokenizer"]
__a : List[Any] = "ChineseCLIPImageProcessor"
__a : Tuple = ("BertTokenizer", "BertTokenizerFast")
def __init__( self ,snake_case__=None ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' ,snake_case__ ,)
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE_ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = self.image_processor
def __call__( self ,snake_case__=None ,snake_case__=None ,snake_case__=None ,**snake_case__ ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer(snake_case__ ,return_tensors=snake_case__ ,**snake_case__ )
if images is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor(snake_case__ ,return_tensors=snake_case__ ,**snake_case__ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE_ : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**snake_case__ ) ,tensor_type=snake_case__ )
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
return self.tokenizer.batch_decode(*snake_case__ ,**snake_case__ )
def snake_case ( self ,*snake_case__ ,**snake_case__ ):
return self.tokenizer.decode(*snake_case__ ,**snake_case__ )
@property
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' ,snake_case__ ,)
return self.image_processor_class
| 715 |
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> Tuple:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
else:
return a * actual_power(lowerCamelCase_ , int(b / 2 ) ) * actual_power(lowerCamelCase_ , int(b / 2 ) )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowerCamelCase_ , lowerCamelCase_ )
return actual_power(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
print(power(-2, -3))
| 685 | 0 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = FunnelTokenizer
__a : int = FunnelTokenizerFast
__a : List[str] = True
__a : int = True
def snake_case ( self ):
super().setUp()
SCREAMING_SNAKE_CASE_ : str = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def snake_case ( self ,**snake_case__ ):
return FunnelTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ : List[str] = 'unwanted, running'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case__ ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,[7, 4, 5, 10, 8, 9] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_tokenizers(do_lower_case=snake_case__ )
for tokenizer in tokenizers:
SCREAMING_SNAKE_CASE_ : Dict = tokenizer('UNwant\u00E9d,running' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] ,[2] + [0] * sentence_len )
SCREAMING_SNAKE_CASE_ : Any = tokenizer('UNwant\u00E9d,running' ,'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] ,[2] + [0] * sentence_len + [1] * sentence_len )
| 716 |
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=13 ,snake_case__=7 ,snake_case__=True ,snake_case__=True ,snake_case__=False ,snake_case__=True ,snake_case__=99 ,snake_case__=32 ,snake_case__=5 ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=512 ,snake_case__=16 ,snake_case__=2 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=4 ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : List[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Tuple = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : List[str] = use_input_mask
SCREAMING_SNAKE_CASE_ : List[str] = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Any = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[str] = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : int = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=snake_case__ ,initializer_range=self.initializer_range ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaModel(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,):
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM(config=snake_case__ )
model.to(snake_case__ )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ : List[Any] = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,use_cache=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : str = ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat([input_mask, next_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE_ : Dict = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
SCREAMING_SNAKE_CASE_ : Tuple = model(
snake_case__ ,attention_mask=snake_case__ ,encoder_hidden_states=snake_case__ ,encoder_attention_mask=snake_case__ ,past_key_values=snake_case__ ,output_hidden_states=snake_case__ ,)['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-3 ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
__a : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
__a : int = (LlamaForCausalLM,) if is_torch_available() else ()
__a : Any = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = False
__a : Tuple = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self ,config_class=snake_case__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : Optional[int] = type
self.model_tester.create_and_check_model(*snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : str = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : int = 'single_label_classification'
SCREAMING_SNAKE_CASE_ : str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Dict = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Tuple = 3
SCREAMING_SNAKE_CASE_ : str = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ : int = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ : Tuple = input_ids.ne(1 ).to(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Optional[int] = LlamaForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(snake_case__ ,attention_mask=snake_case__ ,labels=snake_case__ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def snake_case ( self ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([1, 10] ,config.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : Any = LlamaModel(snake_case__ )
original_model.to(snake_case__ )
original_model.eval()
SCREAMING_SNAKE_CASE_ : int = original_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : List[Any] = original_model(snake_case__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ : List[Any] = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ : int = LlamaModel(snake_case__ )
scaled_model.to(snake_case__ )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ : str = scaled_model(snake_case__ ).last_hidden_state
SCREAMING_SNAKE_CASE_ : Optional[int] = scaled_model(snake_case__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ ,snake_case__ ,atol=1E-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : List[Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Dict = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : Tuple = model(torch.tensor(snake_case__ ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
SCREAMING_SNAKE_CASE_ : str = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
SCREAMING_SNAKE_CASE_ : int = model(torch.tensor(snake_case__ ) )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,snake_case__ ,atol=1E-2 ,rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,snake_case__ ,atol=1E-5 ,rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ : List[str] = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ : str = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.encode(snake_case__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE_ : Tuple = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=snake_case__ )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.generate(snake_case__ ,max_new_tokens=64 ,top_p=snake_case__ ,temperature=1 ,do_sample=snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(generated_ids[0] ,skip_special_tokens=snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
| 685 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : List[str] = ProphetNetTokenizer
__a : List[str] = False
def snake_case ( self ):
super().setUp()
SCREAMING_SNAKE_CASE_ : str = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE_ : Tuple = 'unwanted, running'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case__ ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,[9, 6, 7, 12, 10, 11] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) ,['ah', '\u535A', '\u63A8', 'zz'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[str] = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['h\u00E9llo'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Any = BasicTokenizer(do_lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = BasicTokenizer(do_lower_case=snake_case__ ,strip_accents=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BasicTokenizer(do_lower_case=snake_case__ ,never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
SCREAMING_SNAKE_CASE_ : List[Any] = {}
for i, token in enumerate(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = i
SCREAMING_SNAKE_CASE_ : Union[str, Any] = WordpieceTokenizer(vocab=snake_case__ ,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) ,[] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) ,['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) ,['[UNK]', 'runn', '##ing'] )
@require_torch
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
SCREAMING_SNAKE_CASE_ : int = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102]
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer(snake_case__ ,padding=snake_case__ ,return_tensors='pt' )
self.assertIsInstance(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case__ ,snake_case__ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
def snake_case ( self ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def snake_case ( self ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def snake_case ( self ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.encode('sequence builders' ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode('multi-sequence build' ,add_special_tokens=snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer.build_inputs_with_special_tokens(snake_case__ ,snake_case__ )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 717 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : List[Any] = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
UpperCamelCase__ : int = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
UpperCamelCase__ : str = {
'''abeja/gpt-neox-japanese-2.7b''': 20_48,
}
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(f.read() )
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : Dict = collections.OrderedDict()
SCREAMING_SNAKE_CASE_ : List[Any] = collections.OrderedDict()
with open(lowerCamelCase_ , 'r' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : Any = f.readlines()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = b
SCREAMING_SNAKE_CASE_ : Dict = idx
for wd in b:
SCREAMING_SNAKE_CASE_ : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : List[str] = PRETRAINED_VOCAB_FILES_MAP
__a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__="<|endoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__="<|startoftext|>" ,snake_case__="<|endoftext|>" ,snake_case__=False ,**snake_case__ ,):
super().__init__(
unk_token=snake_case__ ,pad_token=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,do_clean_text=snake_case__ ,**snake_case__ ,)
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(snake_case__ ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
SCREAMING_SNAKE_CASE_ : str = do_clean_text
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = load_vocab_and_emoji(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab ,ids_to_tokens=self.ids_to_tokens ,emoji=self.emoji )
@property
def snake_case ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def snake_case ( self ):
return dict(self.raw_vocab ,**self.added_tokens_encoder )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.tokenize(snake_case__ ,clean=self.do_clean_text )
def snake_case ( self ,snake_case__ ):
return self.vocab.get(snake_case__ ,self.vocab.get(self.unk_token ) )
def snake_case ( self ,snake_case__ ):
return self.subword_tokenizer.convert_id_to_token(snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : str = ''.join(snake_case__ ).strip()
return out_string
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case__ ,add_special_tokens=snake_case__ ) + [self.eos_token_id] )
if len(snake_case__ ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
if os.path.isdir(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
SCREAMING_SNAKE_CASE_ : Tuple = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
SCREAMING_SNAKE_CASE_ : str = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(','.join(snake_case__ ) + '\n' )
index += 1
with open(snake_case__ ,'w' ,encoding='utf-8' ) as writer:
json.dump(self.emoji ,snake_case__ )
return vocab_file, emoji_file
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = vocab # same as swe
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_to_tokens # same as bpe
SCREAMING_SNAKE_CASE_ : Dict = emoji
SCREAMING_SNAKE_CASE_ : int = np.max([len(snake_case__ ) for w in self.vocab.keys()] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : str = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
SCREAMING_SNAKE_CASE_ : List[str] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
SCREAMING_SNAKE_CASE_ : str = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
SCREAMING_SNAKE_CASE_ : int = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
SCREAMING_SNAKE_CASE_ : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<URL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = self.content_repattera.sub('<EMAIL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<TEL>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.content_repattera.sub('<DATE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = self.content_repattera.sub('<PRICE>' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = content.replace('<BLOCK><BLOCK>' ,'<BLOCK>' )
return content
def snake_case ( self ,snake_case__ ,snake_case__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace(' ' ,'<SP>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('\r\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\n' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = text.replace('\r' ,'<BR>' )
SCREAMING_SNAKE_CASE_ : List[str] = text.replace('\t' ,'<TAB>' )
SCREAMING_SNAKE_CASE_ : List[Any] = text.replace('—' ,'ー' )
SCREAMING_SNAKE_CASE_ : Optional[int] = text.replace('−' ,'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
SCREAMING_SNAKE_CASE_ : int = text.replace(snake_case__ ,snake_case__ )
if clean:
SCREAMING_SNAKE_CASE_ : str = self.clean_text(snake_case__ )
def check_simbol(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 2:
SCREAMING_SNAKE_CASE_ : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0XC2A1 and c <= 0XC2BF)
or (c >= 0XC780 and c <= 0XC783)
or (c >= 0XCAB9 and c <= 0XCBBF)
or (c >= 0XCC80 and c <= 0XCDA2)
):
return True
return False
def checkuae(snake_case__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = x.encode()
if len(snake_case__ ) == 1 and len(snake_case__ ) == 3:
SCREAMING_SNAKE_CASE_ : Dict = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0XE2_8080 and c <= 0XE2_B07F:
return True
return False
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
while pos < len(snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = min(len(snake_case__ ) ,pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
SCREAMING_SNAKE_CASE_ : List[Any] = [] # (token_id, token, pos)
for e in range(snake_case__ ,snake_case__ ,-1 ):
SCREAMING_SNAKE_CASE_ : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(snake_case__ ) > 2:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(snake_case__ ) > 0:
# the smallest token_id is adopted
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sorted(snake_case__ ,key=lambda snake_case__ : x[0] )[0]
result.append(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = e
else:
SCREAMING_SNAKE_CASE_ : Any = pos + 1
SCREAMING_SNAKE_CASE_ : Optional[int] = text[pos:end]
if check_simbol(snake_case__ ):
result.append('<KIGOU>' )
elif checkuae(snake_case__ ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
SCREAMING_SNAKE_CASE_ : int = end
return result
def snake_case ( self ,snake_case__ ,snake_case__="\n" ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(snake_case__ )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(snake_case__ )
if len(snake_case__ ) > 0:
words.append(bytearray(snake_case__ ).decode('utf-8' ,errors='replace' ) )
SCREAMING_SNAKE_CASE_ : int = ''.join(snake_case__ )
return text
| 685 | 0 |
from collections.abc import Callable
import numpy as np
def __UpperCAmelCase ( lowerCamelCase_ : Callable , lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float , lowerCamelCase_ : float ) -> np.array:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE_ : Tuple = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE_ : Dict = ya
SCREAMING_SNAKE_CASE_ : List[str] = xa
for k in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = y[k] + step_size * ode_func(lowerCamelCase_ , y[k] )
SCREAMING_SNAKE_CASE_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(lowerCamelCase_ , y[k] ) + ode_func(x + step_size , lowerCamelCase_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685 | 0 |
import math
import os
import sys
def __UpperCAmelCase ( lowerCamelCase_ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ''
try:
with open(lowerCamelCase_ , 'rb' ) as binary_file:
SCREAMING_SNAKE_CASE_ : Any = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE_ : List[str] = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> None:
"""simple docstring"""
lexicon.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = last_match_id
if math.loga(lowerCamelCase_ ).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE_ : Optional[Any] = '0' + lexicon[curr_key]
SCREAMING_SNAKE_CASE_ : int = bin(lowerCamelCase_ )[2:]
def __UpperCAmelCase ( lowerCamelCase_ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE_ : Optional[Any] = '', ''
SCREAMING_SNAKE_CASE_ : List[Any] = len(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE_ : Dict = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
index += 1
SCREAMING_SNAKE_CASE_ : str = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE_ : Tuple = lexicon[curr_string]
result += last_match_id
return result
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.getsize(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = bin(lowerCamelCase_ )[2:]
SCREAMING_SNAKE_CASE_ : List[str] = len(lowerCamelCase_ )
return "0" * (length_length - 1) + file_length_binary + compressed
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = 8
try:
with open(lowerCamelCase_ , 'wb' ) as opened_file:
SCREAMING_SNAKE_CASE_ : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCamelCase_ ) , lowerCamelCase_ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCamelCase_ , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = read_file_binary(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = compress_data(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = add_file_length(lowerCamelCase_ , lowerCamelCase_ )
write_file_binary(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 719 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ : Tuple = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 685 | 0 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = checkpoint
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : Tuple = vae_state_dict['encoder.conv_in.weight']
SCREAMING_SNAKE_CASE_ : Optional[int] = vae_state_dict['encoder.conv_in.bias']
SCREAMING_SNAKE_CASE_ : Optional[int] = vae_state_dict['encoder.conv_out.weight']
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['encoder.conv_out.bias']
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['encoder.norm_out.weight']
SCREAMING_SNAKE_CASE_ : List[str] = vae_state_dict['encoder.norm_out.bias']
SCREAMING_SNAKE_CASE_ : Any = vae_state_dict['decoder.conv_in.weight']
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['decoder.conv_in.bias']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vae_state_dict['decoder.conv_out.weight']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vae_state_dict['decoder.conv_out.bias']
SCREAMING_SNAKE_CASE_ : List[str] = vae_state_dict['decoder.norm_out.weight']
SCREAMING_SNAKE_CASE_ : Dict = vae_state_dict['decoder.norm_out.bias']
SCREAMING_SNAKE_CASE_ : Dict = vae_state_dict['quant_conv.weight']
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['quant_conv.bias']
SCREAMING_SNAKE_CASE_ : Dict = vae_state_dict['post_quant_conv.weight']
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
SCREAMING_SNAKE_CASE_ : List[Any] = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
SCREAMING_SNAKE_CASE_ : int = {
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(lowerCamelCase_ )
}
# Retrieves the keys for the decoder up blocks only
SCREAMING_SNAKE_CASE_ : Optional[Any] = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
SCREAMING_SNAKE_CASE_ : List[Any] = {
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(lowerCamelCase_ )
}
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
SCREAMING_SNAKE_CASE_ : Dict = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
SCREAMING_SNAKE_CASE_ : str = renew_vae_resnet_paths(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = {'old': F'down.{i}.block', 'new': F'down_blocks.{i}.resnets'}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = [key for key in vae_state_dict if 'encoder.mid.block' in key]
SCREAMING_SNAKE_CASE_ : Dict = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
SCREAMING_SNAKE_CASE_ : Optional[int] = renew_vae_resnet_paths(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
SCREAMING_SNAKE_CASE_ : int = renew_vae_attention_paths(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
conv_attn_to_linear(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Dict = num_up_blocks - 1 - i
SCREAMING_SNAKE_CASE_ : Dict = [
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
SCREAMING_SNAKE_CASE_ : List[Any] = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
SCREAMING_SNAKE_CASE_ : Any = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
SCREAMING_SNAKE_CASE_ : List[Any] = renew_vae_resnet_paths(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = {'old': F'up.{block_id}.block', 'new': F'up_blocks.{i}.resnets'}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = [key for key in vae_state_dict if 'decoder.mid.block' in key]
SCREAMING_SNAKE_CASE_ : List[Any] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE_ : List[str] = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
SCREAMING_SNAKE_CASE_ : Tuple = renew_vae_resnet_paths(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = {'old': F'mid.block_{i}', 'new': F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : int = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
SCREAMING_SNAKE_CASE_ : str = renew_vae_attention_paths(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , additional_replacements=[meta_path] , config=lowerCamelCase_ )
conv_attn_to_linear(lowerCamelCase_ )
return new_checkpoint
def __UpperCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
SCREAMING_SNAKE_CASE_ : Any = io.BytesIO(r.content )
SCREAMING_SNAKE_CASE_ : Optional[Any] = OmegaConf.load(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = 5_12
SCREAMING_SNAKE_CASE_ : str = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
SCREAMING_SNAKE_CASE_ : Tuple = {}
with safe_open(lowerCamelCase_ , framework='pt' , device='cpu' ) as f:
for key in f.keys():
SCREAMING_SNAKE_CASE_ : Optional[int] = f.get_tensor(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.load(lowerCamelCase_ , map_location=lowerCamelCase_ )['state_dict']
# Convert the VAE model.
SCREAMING_SNAKE_CASE_ : Optional[Any] = create_vae_diffusers_config(lowerCamelCase_ , image_size=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = custom_convert_ldm_vae_checkpoint(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Any = AutoencoderKL(**lowerCamelCase_ )
vae.load_state_dict(lowerCamelCase_ )
vae.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
UpperCamelCase__ : str = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 720 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
__a : Union[str, Any] = CLIPTokenizer
__a : List[str] = CLIPTokenizerFast
__a : List[str] = True
__a : Tuple = {}
__a : Tuple = False
def snake_case ( self ):
super().setUp()
# fmt: off
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(snake_case__ ,range(len(snake_case__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Any = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
SCREAMING_SNAKE_CASE_ : Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(snake_case__ ) )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,**snake_case__ ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**snake_case__ )
def snake_case ( self ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Tuple = 'lower newer'
return input_text, output_text
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = CLIPTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : List[Any] = 'lower newer'
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
SCREAMING_SNAKE_CASE_ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) ,snake_case__ )
@require_ftfy
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(snake_case__ ,**snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
SCREAMING_SNAKE_CASE_ : Dict = 'xa\u0303y' + ' ' + 'x\xe3y'
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of space type
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
# Test that the tokenization is identical on unicode of line break type
SCREAMING_SNAKE_CASE_ : Tuple = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_s.tokenize(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r.tokenize(snake_case__ )
self.assertListEqual(snake_case__ ,snake_case__ )
def snake_case ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'{text_of_1_token} {text_of_1_token}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : str = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(snake_case__ ) + 1, len(snake_case__ ) + 1 + len(snake_case__ )) ,)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = F' {text}'
SCREAMING_SNAKE_CASE_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
snake_case__ ,use_fast=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : int = tokenizer_r(snake_case__ ,return_offsets_mapping=snake_case__ ,add_special_tokens=snake_case__ )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(snake_case__ )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(snake_case__ ) + 1, 1 + len(snake_case__ ) + 1 + len(snake_case__ )) ,)
def snake_case ( self ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(snake_case__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def snake_case ( self ):
super().test_tokenization_python_rust_equals()
def snake_case ( self ):
# CLIP always lower cases letters
pass
| 685 | 0 |
UpperCamelCase__ : Any = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [False] * len(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = [s]
SCREAMING_SNAKE_CASE_ : Dict = True
while queue:
SCREAMING_SNAKE_CASE_ : Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : List[Any] = u
return visited[t]
def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [-1] * (len(lowerCamelCase_ ))
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = float('Inf' )
SCREAMING_SNAKE_CASE_ : List[str] = sink
while s != source:
# Find the minimum value in select path
SCREAMING_SNAKE_CASE_ : int = min(lowerCamelCase_ , graph[parent[s]][s] )
SCREAMING_SNAKE_CASE_ : Tuple = parent[s]
max_flow += path_flow
SCREAMING_SNAKE_CASE_ : List[Any] = sink
while v != source:
SCREAMING_SNAKE_CASE_ : Tuple = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
SCREAMING_SNAKE_CASE_ : int = parent[v]
for i in range(len(lowerCamelCase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 721 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' )
SCREAMING_SNAKE_CASE_ : int = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_ )
DownloadCommand.register_subcommand(lowerCamelCase_ )
EnvironmentCommand.register_subcommand(lowerCamelCase_ )
RunCommand.register_subcommand(lowerCamelCase_ )
ServeCommand.register_subcommand(lowerCamelCase_ )
UserCommands.register_subcommand(lowerCamelCase_ )
AddNewModelCommand.register_subcommand(lowerCamelCase_ )
AddNewModelLikeCommand.register_subcommand(lowerCamelCase_ )
LfsCommands.register_subcommand(lowerCamelCase_ )
PTtoTFCommand.register_subcommand(lowerCamelCase_ )
# Let's go
SCREAMING_SNAKE_CASE_ : Optional[int] = parser.parse_args()
if not hasattr(lowerCamelCase_ , 'func' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE_ : Optional[Any] = args.func(lowerCamelCase_ )
service.run()
if __name__ == "__main__":
main()
| 685 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase :str = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :str = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCamelCase :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowerCamelCase :int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCamelCase :int = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowerCamelCase :Optional[Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _a (self , lowercase=None , lowercase=None , lowercase=False ):
if concatenate_texts:
return compute_measures(lowercase , lowercase )["wer"]
else:
A_ : List[Any] = 0
A_ : Optional[int] = 0
for prediction, reference in zip(lowercase , lowercase ):
A_ : Any = compute_measures(lowercase , lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 686 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :Any = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a (self ):
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : int = CLIPTextModel(lowercase )
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a (self , lowercase , lowercase=0 ):
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : int = image / 2 + 0.5
if str(lowercase ).startswith("""mps""" ):
A_ : int = torch.manual_seed(lowercase )
else:
A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a (self ):
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = CycleDiffusionPipeline(**lowercase )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = self.get_dummy_inputs(lowercase )
A_ : str = pipe(**lowercase )
A_ : str = output.images
A_ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a (self ):
A_ : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase , """half""" ):
A_ : List[str] = module.half()
A_ : List[Any] = CycleDiffusionPipeline(**lowercase )
A_ : Optional[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Any = self.get_dummy_inputs(lowercase )
A_ : Tuple = pipe(**lowercase )
A_ : List[str] = output.images
A_ : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a (self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def _a (self ):
return super().test_inference_batch_single_identical()
@skip_mps
def _a (self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a (self ):
return super().test_save_load_optional_components()
@skip_mps
def _a (self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A_ : List[str] = init_image.resize((512, 512) )
A_ : Dict = """CompVis/stable-diffusion-v1-4"""
A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : Any = CycleDiffusionPipeline.from_pretrained(
lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : str = """A black colored car"""
A_ : Dict = """A blue colored car"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : str = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _a (self ):
A_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A_ : Optional[int] = init_image.resize((512, 512) )
A_ : Optional[int] = """CompVis/stable-diffusion-v1-4"""
A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : Optional[Any] = """A black colored car"""
A_ : int = """A blue colored car"""
A_ : str = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : int = output.images
assert np.abs(image - expected_image ).max() < 2E-2 | 686 | 1 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase :str = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = GPTSwaTokenizer
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : List[Any] = True
__SCREAMING_SNAKE_CASE : List[Any] = False
def _a (self ):
super().setUp()
# We have a SentencePiece fixture for testing
A_ : Union[str, Any] = GPTSwaTokenizer(lowercase , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def _a (self , lowercase ):
A_ : List[str] = """This is a test"""
A_ : List[str] = """This is a test"""
return input_text, output_text
def _a (self ):
A_ : str = """<s>"""
A_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase )
def _a (self ):
A_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(lowercase ) , 2000 )
def _a (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def _a (self ):
A_ : int = GPTSwaTokenizer(lowercase )
A_ : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [465, 287, 265, 631, 842] )
A_ : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
lowercase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
A_ : List[Any] = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
A_ : Optional[int] = tokenizer.convert_ids_to_tokens(lowercase )
# fmt: off
self.assertListEqual(
lowercase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def _a (self ):
A_ : List[Any] = GPTSwaTokenizer(lowercase )
A_ : Optional[int] = ["""This is a test""", """I was born in 92000, and this is falsé."""]
A_ : Union[str, Any] = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(lowercase , lowercase ):
self.assertListEqual(tokenizer.encode_fast(lowercase ) , lowercase )
# Test that decode_fast returns the input text
for text, token_ids in zip(lowercase , lowercase ):
self.assertEqual(tokenizer.decode_fast(lowercase ) , lowercase )
@slow
def _a (self ):
A_ : Union[str, Any] = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
A_ : Dict = {"""input_ids""": [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=lowercase , ) | 686 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'down'
def _a (self ):
A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'down'
def _a (self ):
A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_, A_ : str = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : int = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'mid'
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'mid'
def _a (self ):
A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common()
A_ : List[str] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'mid'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[int] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : int = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(lowercase ) | 686 | 1 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
lowerCamelCase :Dict = '''facebook/wmt19-en-de'''
lowerCamelCase :Optional[int] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
lowerCamelCase :Union[str, Any] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
lowerCamelCase :Tuple = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
lowerCamelCase :Union[str, Any] = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
lowerCamelCase :Any = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
lowerCamelCase :str = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de | 686 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
A_ : List[Any] = word_bank or []
# create a table
A_ : int = len(lowerCamelCase__ ) + 1
A_ : list[list[list[str]]] = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
A_ : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
A_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
) | 686 | 1 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase :Optional[int] = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2
A_ : List[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ )
class _lowerCAmelCase ( __UpperCAmelCase ):
pass
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase ):
super().__init__()
A_ : int = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 )
A_ : str = deepcopy(self.diffusion )
A_ : Optional[int] = torch.quasirandom.SobolEngine(1 , scramble=lowercase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = MODELS_MAP[model_name]["""url"""]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCamelCase :str = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCamelCase :int = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCamelCase :List[Any] = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCamelCase :Optional[Any] = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return name.replace(lowerCamelCase__ , lowerCamelCase__ )
elif name.startswith(lowerCamelCase__ ):
return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def a ( lowerCamelCase__ , lowerCamelCase__=13 ):
'''simple docstring'''
A_ : Union[str, Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
A_ : Dict = 0
if string.startswith("""net.3.""" ):
depth += 1
A_ : int = string[6:]
elif string.startswith("""net.""" ):
A_ : Tuple = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
A_ : Dict = string[7:]
if string.startswith("""main.""" ):
A_ : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
A_ : Optional[Any] = string[:2]
A_ : Optional[Any] = string[2:]
else:
A_ : List[Any] = string[0]
A_ : Dict = string[1:]
if depth == max_depth:
A_ : Optional[int] = MID_NUM_TO_LAYER[layer_num]
A_ : Optional[Any] = """mid_block"""
elif depth > 0 and int(lowerCamelCase__ ) < 7:
A_ : Any = DOWN_NUM_TO_LAYER[layer_num]
A_ : Union[str, Any] = f'down_blocks.{depth}'
elif depth > 0 and int(lowerCamelCase__ ) > 7:
A_ : List[str] = UP_NUM_TO_LAYER[layer_num]
A_ : List[str] = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
A_ : str = DEPTH_0_TO_LAYER[layer_num]
A_ : Dict = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
A_ : Optional[int] = string_left[1:]
if "resnets" in new_layer:
A_ : Tuple = convert_resconv_naming(lowerCamelCase__ )
elif "attentions" in new_layer:
A_ : Optional[int] = convert_attn_naming(lowerCamelCase__ )
A_ : Dict = new_string_left
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = prefix + """.""" + new_layer + """.""" + string_left
else:
A_ : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
A_ : List[Any] = rename(lowerCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Tuple = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
A_ : int = v
return new_state_dict
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
A_ : Optional[Any] = v[:, :, 0]
else:
# bias
A_ : Union[str, Any] = v
else:
# qkv matrices
A_ : Optional[int] = v.shape[0]
A_ : str = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A_ : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A_ : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
A_ : int = download(lowerCamelCase__ )
A_ : Any = MODELS_MAP[model_name]["""sample_rate"""]
A_ : List[Any] = MODELS_MAP[model_name]["""sample_size"""]
A_ : Tuple = Object()
A_ : Union[str, Any] = sample_size
A_ : Tuple = sample_rate
A_ : int = 0
A_ : List[Any] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ )
A_ : Optional[Any] = diffusers_model.state_dict()
A_ : Dict = DiffusionUncond(lowerCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] )
A_ : Any = orig_model.diffusion_ema.eval()
A_ : Any = orig_model.state_dict()
A_ : List[str] = rename_orig_weights(lowerCamelCase__ )
A_ : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
A_ : str = value.squeeze()
A_ : Union[str, Any] = value
diffusers_model.load_state_dict(lowerCamelCase__ )
A_ : Optional[Any] = 1_00
A_ : Union[str, Any] = 33
A_ : Any = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ )
A_ : List[str] = torch.manual_seed(lowerCamelCase__ )
A_ : Any = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ )
A_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1]
A_ : List[Any] = get_crash_schedule(lowerCamelCase__ )
A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A_ : str = torch.manual_seed(33 )
A_ : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios
A_ : Optional[int] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} )
A_ : str = generated.clamp(-1 , 1 )
A_ : List[Any] = (generated - audio).abs().sum()
A_ : int = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , lowerCamelCase__ )
print("""Diff max""" , lowerCamelCase__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase :List[str] = parser.parse_args()
main(args) | 686 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = []
A_ : int = set({"""(""", """[""", """{"""} )
A_ : Union[str, Any] = set({""")""", """]""", """}"""} )
A_ : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowerCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase__ ) == 0
def a ( ):
'''simple docstring'''
A_ : int = input("""Enter sequence of brackets: """ )
if is_balanced(lowerCamelCase__ ):
print(lowerCamelCase__ , """is balanced""" )
else:
print(lowerCamelCase__ , """is not balanced""" )
if __name__ == "__main__":
main() | 686 | 1 |
'''simple docstring'''
import random
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = a[left_index]
A_ : int = left_index + 1
for j in range(left_index + 1 , lowerCamelCase__ ):
if a[j] < pivot:
A_, A_ : Tuple = a[i], a[j]
i += 1
A_, A_ : Any = a[i - 1], a[left_index]
return i - 1
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if left < right:
A_ : str = random.randint(lowerCamelCase__ , right - 1 )
A_, A_ : Union[str, Any] = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
A_ : Tuple = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
quick_sort_random(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCamelCase__ , pivot_index + 1 , lowerCamelCase__ ) # recursive quicksort to the right of the pivot point
def a ( ):
'''simple docstring'''
A_ : List[Any] = input("""Enter numbers separated by a comma:\n""" ).strip()
A_ : List[Any] = [int(lowerCamelCase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) )
print(lowerCamelCase__ )
if __name__ == "__main__":
main() | 686 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
A_ : Optional[int] = field
A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
A_ : Optional[Any] = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def _a (self ):
# Build iterable dataset
if self.streaming:
A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : int = None
A_ : Union[str, Any] = None
A_ : int = None
A_ : List[str] = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
A_ : str = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
A_ : Any = dataset
A_ : List[str] = path_or_buf
A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ : Optional[Any] = num_proc
A_ : List[Any] = """utf-8"""
A_ : int = to_json_kwargs
def _a (self ):
A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase )
A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer:
A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
A_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def _a (self , lowercase ):
A_, A_, A_, A_, A_ : List[str] = args
A_ : List[str] = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ : Any = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
A_, A_ : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowercase )
return written | 686 | 1 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=32 , lowercase=3 , lowercase=4 , lowercase=[10, 20, 30, 40] , lowercase=[2, 2, 3, 2] , lowercase=True , lowercase=True , lowercase=37 , lowercase="gelu" , lowercase=10 , lowercase=0.02 , lowercase=["stage2", "stage3", "stage4"] , lowercase=3 , lowercase=None , ):
A_ : int = parent
A_ : str = batch_size
A_ : Union[str, Any] = image_size
A_ : str = num_channels
A_ : str = num_stages
A_ : Optional[int] = hidden_sizes
A_ : int = depths
A_ : Any = is_training
A_ : List[Any] = use_labels
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : int = type_sequence_label_size
A_ : int = initializer_range
A_ : List[Any] = out_features
A_ : Optional[int] = num_labels
A_ : Optional[Any] = scope
A_ : List[str] = num_stages
def _a (self ):
A_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Union[str, Any] = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : List[str] = self.get_config()
return config, pixel_values, labels
def _a (self ):
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def _a (self ):
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=512 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowercase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=256 , auxiliary_num_convs=1 , auxiliary_concat_input=lowercase , loss_ignore_index=255 , num_labels=self.num_labels , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Any = UperNetForSemanticSegmentation(config=lowercase )
model.to(lowercase )
model.eval()
A_ : int = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _a (self ):
A_ : List[str] = self.prepare_config_and_inputs()
(
(
A_
), (
A_
), (
A_
),
) : Union[str, Any] = config_and_inputs
A_ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[str] = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Tuple = False
def _a (self ):
A_ : Optional[int] = UperNetModelTester(self )
A_ : str = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def _a (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a (self ):
return
def _a (self ):
A_, A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : str = [*signature.parameters.keys()]
A_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _a (self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
@unittest.skip(reason="""UperNet does not use inputs_embeds""" )
def _a (self ):
pass
@unittest.skip(reason="""UperNet does not support input and output embeddings""" )
def _a (self ):
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _a (self ):
pass
@unittest.skip(reason="""UperNet does not have a base model""" )
def _a (self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _a (self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a (self ):
pass
def _a (self ):
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : str = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
A_ : Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase ) )
A_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : List[str] = self.model_tester.num_stages
self.assertEqual(len(lowercase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A_, A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Union[str, Any] = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def _a (self ):
A_, A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Optional[Any] = _config_zero_init(lowercase )
A_ : Any = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
A_ : str = model_class(config=lowercase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip(reason="""UperNet does not have tied weights""" )
def _a (self ):
pass
@slow
def _a (self ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[int] = UperNetForSemanticSegmentation.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a ( ):
'''simple docstring'''
A_ : Any = hf_hub_download(
repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" )
A_ : int = Image.open(lowerCamelCase__ ).convert("""RGB""" )
return image
@require_torch
@require_vision
@slow
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : int = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" )
A_ : Dict = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(lowercase )
A_ : Tuple = prepare_img()
A_ : Optional[Any] = processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
with torch.no_grad():
A_ : int = model(**lowercase )
A_ : int = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : str = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase , atol=1E-4 ) )
def _a (self ):
A_ : Tuple = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" )
A_ : Dict = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(lowercase )
A_ : Any = prepare_img()
A_ : str = processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
with torch.no_grad():
A_ : Optional[Any] = model(**lowercase )
A_ : int = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : List[Any] = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowercase , atol=1E-4 ) ) | 686 |
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
A_ : Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = get_model_to_test_mapping(lowercase )
A_ : List[str] = get_model_to_test_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A_ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = get_model_to_tester_mapping(lowercase )
A_ : Optional[int] = get_model_to_tester_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A_ : Dict = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) | 686 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase :str = {
'''configuration_x_clip''': [
'''XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XCLIPConfig''',
'''XCLIPTextConfig''',
'''XCLIPVisionConfig''',
],
'''processing_x_clip''': ['''XCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[str] = [
'''XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XCLIPModel''',
'''XCLIPPreTrainedModel''',
'''XCLIPTextModel''',
'''XCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :Any = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase :Any = 1_6
lowerCamelCase :Tuple = 3_2
def a ( lowerCamelCase__ , lowerCamelCase__ = 16 ):
'''simple docstring'''
A_ : str = AutoTokenizer.from_pretrained("""bert-base-cased""" )
A_ : Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A_ : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Any = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : Union[str, Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : str = 16
elif accelerator.mixed_precision != "no":
A_ : Any = 8
else:
A_ : List[Any] = None
return tokenizer.pad(
lowerCamelCase__ , padding="""longest""" , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
A_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase :Union[str, Any] = mocked_dataloaders # noqa: F811
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCamelCase__ ) == "1":
A_ : Tuple = 2
# New Code #
A_ : Optional[Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
A_ : Union[str, Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCamelCase__ )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : Tuple = config["""lr"""]
A_ : Any = int(config["""num_epochs"""] )
A_ : List[str] = int(config["""seed"""] )
A_ : Any = int(config["""batch_size"""] )
A_ : Tuple = evaluate.load("""glue""" , """mrpc""" )
set_seed(lowerCamelCase__ )
A_, A_ : Union[str, Any] = get_dataloaders(lowerCamelCase__ , lowerCamelCase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
A_ : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
A_ : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_00 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_, A_, A_, A_, A_ : Optional[int] = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCamelCase__ ):
A_ : List[str] = model(**lowerCamelCase__ )
A_ : List[str] = output.loss
accelerator.backward(lowerCamelCase__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A_ : Any = model(**lowerCamelCase__ )
A_ : List[str] = outputs.logits.argmax(dim=-1 )
A_, A_ : Dict = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
A_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , lowerCamelCase__ )
def a ( ):
'''simple docstring'''
A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowerCamelCase__ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
A_ : Dict = parser.parse_args()
A_ : Optional[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main() | 686 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase :Any = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def a ( lowerCamelCase__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCamelCase :List[Any] = parser.parse_args()
if args.check_lib:
lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''')
lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent
else:
lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''') | 686 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class _lowerCAmelCase :
def _a (self , lowercase , lowercase , lowercase ):
return None
class _lowerCAmelCase :
def _a (self , lowercase , lowercase , lowercase , lowercase ):
return None
class _lowerCAmelCase ( unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _a (self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase , """tf""" , 12 , **lowercase )
@require_torch
@slow
def _a (self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowercase , """pt""" , 12 , **lowercase )
@require_torch
@slow
def _a (self ):
from transformers import BertModel
A_ : Optional[Any] = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(lowercase ) )
vocab_file.flush()
A_ : Tuple = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
A_ : Optional[int] = BertModel(BertConfig(vocab_size=len(lowercase ) ) )
model.save_pretrained(lowercase )
self._test_export(lowercase , """pt""" , 12 , lowercase )
@require_tf
@slow
def _a (self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A_ : List[Any] = self._test_export(lowercase , """tf""" , 12 , **lowercase )
A_ : Union[str, Any] = quantize(Path(lowercase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def _a (self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A_ : Any = self._test_export(lowercase , """pt""" , 12 , **lowercase )
A_ : Any = quantize(lowercase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowercase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def _a (self , lowercase , lowercase , lowercase , lowercase=None , **lowercase ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
A_ : Any = Path(lowercase ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase )
return path
except Exception as e:
self.fail(lowercase )
@require_torch
@require_tokenizers
@slow
def _a (self ):
from transformers import BertModel
A_ : List[Any] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
A_ : Optional[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(lowercase , lowercase , """pt""" )
@require_tf
@require_tokenizers
@slow
def _a (self ):
from transformers import TFBertModel
A_ : Dict = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
A_ : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(lowercase , lowercase , """tf""" )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Union[str, Any] = FeatureExtractionPipeline(lowercase , lowercase )
A_ : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
A_, A_, A_, A_ : Optional[Any] = infer_shapes(lowercase , lowercase )
# Assert all variables are present
self.assertEqual(len(lowercase ) , len(lowercase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowercase )
self.assertSequenceEqual(variable_names[3:] , lowercase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def _a (self ):
A_ : Dict = ["""input_ids""", """attention_mask""", """token_type_ids"""]
A_ : List[str] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
A_, A_ : List[str] = ensure_valid_input(FuncContiguousArgs() , lowercase , lowercase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowercase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowercase ) , set(lowercase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowercase , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
A_, A_ : str = ensure_valid_input(FuncNonContiguousArgs() , lowercase , lowercase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowercase ) , 1 )
self.assertEqual(len(lowercase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def _a (self ):
A_ : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() ) | 686 |
'''simple docstring'''
lowerCamelCase :dict[tuple[int, int, int], int] = {}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
A_ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 )
A_ : Optional[Any] = state_late + state_absent + state_ontime
A_ : Dict = prizestrings
return prizestrings
def a ( lowerCamelCase__ = 30 ):
'''simple docstring'''
return _calculate(lowerCamelCase__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 686 | 1 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = LongformerTokenizer
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Optional[Any] = LongformerTokenizerFast
__SCREAMING_SNAKE_CASE : int = True
def _a (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
A_ : str = dict(zip(lowercase , range(len(lowercase ) ) ) )
A_ : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A_ : Union[str, Any] = {"""unk_token""": """<unk>"""}
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase ) )
def _a (self , **lowercase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def _a (self , **lowercase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def _a (self , lowercase ):
A_ : Tuple = """lower newer"""
A_ : str = """lower newer"""
return input_text, output_text
def _a (self ):
A_ : Any = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A_ : Any = """lower newer"""
A_ : Any = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
A_ : int = tokenizer.tokenize(lowercase ) # , add_prefix_space=True)
self.assertListEqual(lowercase , lowercase )
A_ : int = tokens + [tokenizer.unk_token]
A_ : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=lowercase ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=lowercase ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _a (self ):
A_ : Tuple = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
A_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase )
A_ : Dict = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase )
A_ : List[str] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=lowercase , add_prefix_space=lowercase )
A_ : int = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=lowercase , add_prefix_space=lowercase )
A_ : int = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _a (self ):
A_ : str = self.get_tokenizer()
A_ : str = """Encode this sequence."""
A_ : Tuple = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
A_ : str = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
A_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase , lowercase )
A_ : str = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
A_ : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase , lowercase )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
A_ : str = tokenizer.encode(lowercase , add_special_tokens=lowercase )
A_ : Dict = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase , lowercase )
# Testing spaces after special tokens
A_ : Union[str, Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase )} ) # mask token has a left space
A_ : List[str] = tokenizer.convert_tokens_to_ids(lowercase )
A_ : str = """Encode <mask> sequence"""
A_ : Union[str, Any] = """Encode <mask>sequence"""
A_ : Optional[int] = tokenizer.encode(lowercase )
A_ : Tuple = encoded.index(lowercase )
A_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase , lowercase )
A_ : str = tokenizer.encode(lowercase )
A_ : List[Any] = encoded.index(lowercase )
A_ : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase , lowercase )
def _a (self ):
pass
def _a (self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : Dict = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
A_ : List[Any] = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
A_ : Tuple = """A, <mask> AllenNLP sentence."""
A_ : List[Any] = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
A_ : Any = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
A_ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
A_ : Any = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def _a (self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A_ : List[str] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
A_ : List[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , lowercase )
self.assertEqual(post_processor_state["""add_prefix_space"""] , lowercase )
self.assertEqual(post_processor_state["""trim_offsets"""] , lowercase )
def _a (self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A_ : Any = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
A_ : List[str] = F'{text_of_1_token} {text_of_1_token}'
A_ : List[Any] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
A_ : List[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
A_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
A_ : List[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
A_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
A_ : int = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
A_ : Any = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
A_ : List[str] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
A_ : Any = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A_ : Tuple = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
A_ : Optional[Any] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ) + 1, 1 + len(lowercase ) + 1 + len(lowercase )) , )
A_ : Any = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
A_ : List[str] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
A_ : Tuple = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
A_ : Optional[int] = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , ) | 686 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'linear'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial'
__SCREAMING_SNAKE_CASE : Optional[int] = 'constant'
__SCREAMING_SNAKE_CASE : str = 'constant_with_warmup'
__SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant'
def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) )
return 1.0
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
A_ : Optional[Any] = {}
A_ : Optional[Any] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A_, A_ : Union[str, Any] = rule_str.split(""":""" )
A_ : Union[str, Any] = int(lowerCamelCase__ )
A_ : List[Any] = float(lowerCamelCase__ )
A_ : Union[str, Any] = value
A_ : Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ):
def rule_func(lowerCamelCase__ ) -> float:
A_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ):
'''simple docstring'''
A_ : Optional[Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A_ : str = lr_init - lr_end
A_ : Tuple = num_training_steps - num_warmup_steps
A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
A_ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase :List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ):
'''simple docstring'''
A_ : Optional[Any] = SchedulerType(lowerCamelCase__ )
A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) | 686 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def _a (self , lowercase , lowercase , lowercase=False ):
A_ : str = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class in get_values(lowercase ):
A_ : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ):
A_ : List[Any] = parent
A_ : Dict = batch_size
A_ : Tuple = seq_length
A_ : Optional[Any] = is_training
A_ : Tuple = use_input_mask
A_ : Optional[Any] = use_token_type_ids
A_ : Optional[Any] = use_labels
A_ : Optional[int] = vocab_size
A_ : List[str] = hidden_size
A_ : int = num_hidden_layers
A_ : str = num_attention_heads
A_ : Dict = intermediate_size
A_ : int = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : Optional[Any] = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : str = type_vocab_size
A_ : List[str] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Any = num_choices
A_ : int = scope
A_ : str = embedding_size
def _a (self ):
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : str = None
if self.use_input_mask:
A_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Any = None
A_ : Union[str, Any] = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : int = ids_tensor([self.batch_size] , self.num_choices )
A_ : str = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Tuple = TFMobileBertModel(config=lowercase )
A_ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : Optional[Any] = model(lowercase )
A_ : Tuple = [input_ids, input_mask]
A_ : Optional[Any] = model(lowercase )
A_ : str = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : List[str] = TFMobileBertForMaskedLM(config=lowercase )
A_ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : Any = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Optional[Any] = TFMobileBertForNextSentencePrediction(config=lowercase )
A_ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : Optional[Any] = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Tuple = TFMobileBertForPreTraining(config=lowercase )
A_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : Union[str, Any] = model(lowercase )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : List[Any] = self.num_labels
A_ : Tuple = TFMobileBertForSequenceClassification(config=lowercase )
A_ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : Optional[Any] = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Any = self.num_choices
A_ : Dict = TFMobileBertForMultipleChoice(config=lowercase )
A_ : int = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
A_ : str = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
A_ : Optional[int] = tf.tile(tf.expand_dims(lowercase , 1 ) , (1, self.num_choices, 1) )
A_ : List[str] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
A_ : Any = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Dict = self.num_labels
A_ : Tuple = TFMobileBertForTokenClassification(config=lowercase )
A_ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : Union[str, Any] = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a (self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
A_ : Dict = TFMobileBertForQuestionAnswering(config=lowercase )
A_ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : List[str] = model(lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a (self ):
A_ : List[str] = self.prepare_config_and_inputs()
(
(
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
), (
A_
),
) : Dict = config_and_inputs
A_ : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def _a (self ):
A_ : Optional[Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
A_ : Optional[int] = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
def _a (self ):
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowercase )
def _a (self ):
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase )
def _a (self ):
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase )
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase )
def _a (self ):
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase )
def _a (self ):
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase )
def _a (self ):
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase )
def _a (self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase )
@slow
def _a (self ):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
A_ : List[str] = TFMobileBertModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
A_ : List[Any] = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" )
A_ : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ : List[Any] = model(lowercase )[0]
A_ : List[Any] = [1, 6, 30522]
self.assertEqual(output.shape , lowercase )
A_ : List[str] = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-4 ) | 686 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''')
lowerCamelCase :int = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCamelCase :List[str] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCamelCase :Union[str, Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCamelCase :Dict = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCamelCase :int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCamelCase :str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase :Tuple = []
lowerCamelCase :Dict = []
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : Dict = value
elif weight_type == "running_mean":
A_ : Optional[Any] = value
elif weight_type == "running_var":
A_ : int = value
elif weight_type == "num_batches_tracked":
A_ : Optional[Any] = value
elif weight_type == "weight_ih_l0":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l0":
A_ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l0":
A_ : Tuple = value
elif weight_type == "weight_ih_l1":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
A_ : Dict = value
elif weight_type == "bias_ih_l1":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l1":
A_ : Tuple = value
else:
A_ : Any = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_, A_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
A_ : List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A_ : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__ , lowerCamelCase__ ):
logger.info(f'{name} was ignored' )
continue
A_ : str = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A_, A_ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
A_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
A_ : Union[str, Any] = True
if "*" in mapped_key:
A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
A_ : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
A_ : Tuple = """weight_hh_l0"""
elif "bias_ih_l0" in name:
A_ : str = """bias_ih_l0"""
elif "bias_hh_l0" in name:
A_ : List[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
A_ : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
A_ : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
A_ : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
A_ : List[Any] = """bias_hh_l1"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
A_ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
A_ : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A_ : Dict = [8, 5, 4, 4]
A_ : Optional[Any] = [2.2]
A_ : Tuple = 64
A_ : Tuple = 3_20_00
A_ : List[Any] = 20_48
A_ : Optional[Any] = False
A_ : str = False
A_ : Optional[int] = False
elif model_name == "encodec_48khz":
A_ : Dict = [8, 5, 4, 2]
A_ : Tuple = [3.0, 6.0, 12.0, 24.0]
A_ : List[Any] = 4_80_00
A_ : Dict = 2
A_ : Dict = False
A_ : Dict = """time_group_norm"""
A_ : Optional[Any] = True
A_ : str = 1.0
A_ : Any = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ : Dict = EncodecModel(lowerCamelCase__ )
A_ : Any = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase__ )
A_ : int = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A_ : Tuple = original_checkpoint["""best_state"""]
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 686 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase :Tuple = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = 'blip_2_vision_model'
def __init__(self , lowercase=1408 , lowercase=6144 , lowercase=39 , lowercase=16 , lowercase=224 , lowercase=14 , lowercase="gelu" , lowercase=0.0_00_01 , lowercase=0.0 , lowercase=1E-10 , lowercase=True , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = hidden_size
A_ : Dict = intermediate_size
A_ : List[Any] = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : int = patch_size
A_ : List[str] = image_size
A_ : Dict = initializer_range
A_ : Optional[Any] = attention_dropout
A_ : str = layer_norm_eps
A_ : List[Any] = hidden_act
A_ : Union[str, Any] = qkv_bias
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : Union[str, Any] = cls.get_config_dict(lowercase , **lowercase )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
A_ : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = 'blip_2_qformer'
def __init__(self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase="absolute" , lowercase=2 , lowercase=1408 , **lowercase , ):
super().__init__(pad_token_id=lowercase , **lowercase )
A_ : List[str] = vocab_size
A_ : Tuple = hidden_size
A_ : Tuple = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Optional[int] = hidden_act
A_ : List[str] = intermediate_size
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : str = max_position_embeddings
A_ : Optional[int] = initializer_range
A_ : List[Any] = layer_norm_eps
A_ : Dict = position_embedding_type
A_ : Dict = cross_attention_frequency
A_ : Tuple = encoder_hidden_size
@classmethod
def _a (cls , lowercase , **lowercase ):
cls._set_token_in_kwargs(lowercase )
A_, A_ : List[Any] = cls.get_config_dict(lowercase , **lowercase )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
A_ : Optional[int] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowercase , **lowercase )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = 'blip-2'
__SCREAMING_SNAKE_CASE : int = True
def __init__(self , lowercase=None , lowercase=None , lowercase=None , lowercase=32 , **lowercase ):
super().__init__(**lowercase )
if vision_config is None:
A_ : Optional[Any] = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
A_ : Dict = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
A_ : Dict = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
A_ : str = BlipaVisionConfig(**lowercase )
A_ : int = BlipaQFormerConfig(**lowercase )
A_ : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
A_ : Tuple = CONFIG_MAPPING[text_model_type](**lowercase )
A_ : Dict = self.text_config.tie_word_embeddings
A_ : List[Any] = self.text_config.is_encoder_decoder
A_ : Tuple = num_query_tokens
A_ : List[str] = self.vision_config.hidden_size
A_ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
A_ : Optional[int] = 1.0
A_ : Union[str, Any] = 0.02
@classmethod
def _a (cls , lowercase , lowercase , lowercase , **lowercase , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowercase , )
def _a (self ):
A_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
A_ : Union[str, Any] = self.vision_config.to_dict()
A_ : Dict = self.qformer_config.to_dict()
A_ : Tuple = self.text_config.to_dict()
A_ : Tuple = self.__class__.model_type
return output | 686 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'beit'
def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = vocab_size
A_ : List[str] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : str = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : str = layer_norm_eps
A_ : Any = image_size
A_ : int = patch_size
A_ : List[str] = num_channels
A_ : Any = use_mask_token
A_ : Dict = use_absolute_position_embeddings
A_ : List[Any] = use_relative_position_bias
A_ : Tuple = use_shared_relative_position_bias
A_ : Optional[int] = layer_scale_init_value
A_ : Tuple = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : Optional[int] = use_auxiliary_head
A_ : Union[str, Any] = auxiliary_loss_weight
A_ : Tuple = auxiliary_channels
A_ : List[Any] = auxiliary_num_convs
A_ : Dict = auxiliary_concat_input
A_ : Optional[Any] = semantic_loss_ignore_index
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4 | 686 | 1 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowerCamelCase :List[Any] = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
@classmethod
def _a (cls ):
A_ : Any = TOKEN
HfFolder.save_token(lowercase )
@classmethod
def _a (cls ):
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def _a (self ):
A_ : List[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ : List[str] = FlaxBertModel(lowercase )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
A_ : List[str] = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
A_ : Dict = flatten_dict(unfreeze(model.params ) )
A_ : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ : Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase , 1E-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowercase , repo_id="""test-model-flax""" , push_to_hub=lowercase , use_auth_token=self._token )
A_ : List[Any] = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
A_ : str = flatten_dict(unfreeze(model.params ) )
A_ : Any = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase , 1E-3 , msg=F'{key} not identical' )
def _a (self ):
A_ : Tuple = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
A_ : int = FlaxBertModel(lowercase )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
A_ : Dict = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
A_ : str = flatten_dict(unfreeze(model.params ) )
A_ : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ : List[Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase , 1E-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowercase , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=lowercase , use_auth_token=self._token )
A_ : Union[str, Any] = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
A_ : List[str] = flatten_dict(unfreeze(model.params ) )
A_ : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A_ : Optional[int] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowercase , 1E-3 , msg=F'{key} not identical' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = True
A_ : Any = flatten_dict(modela.params )
A_ : Any = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
A_ : int = False
return models_are_equal
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
A_ : Any = FlaxBertModel(lowercase )
A_ : List[str] = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase , lowercase ) )
with self.assertRaises(lowercase ):
A_ : Dict = FlaxBertModel.from_pretrained(lowercase )
A_ : Tuple = FlaxBertModel.from_pretrained(lowercase , subfolder=lowercase )
self.assertTrue(check_models_equal(lowercase , lowercase ) )
def _a (self ):
A_ : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
A_ : Tuple = FlaxBertModel(lowercase )
A_ : Optional[Any] = """bert"""
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowercase , lowercase ) , max_shard_size="""10KB""" )
with self.assertRaises(lowercase ):
A_ : Optional[int] = FlaxBertModel.from_pretrained(lowercase )
A_ : Optional[Any] = FlaxBertModel.from_pretrained(lowercase , subfolder=lowercase )
self.assertTrue(check_models_equal(lowercase , lowercase ) )
def _a (self ):
A_ : Tuple = """bert"""
A_ : Union[str, Any] = """hf-internal-testing/tiny-random-bert-subfolder"""
with self.assertRaises(lowercase ):
A_ : Dict = FlaxBertModel.from_pretrained(lowercase )
A_ : Union[str, Any] = FlaxBertModel.from_pretrained(lowercase , subfolder=lowercase )
self.assertIsNotNone(lowercase )
def _a (self ):
A_ : Union[str, Any] = """bert"""
A_ : Union[str, Any] = """hf-internal-testing/tiny-random-bert-sharded-subfolder"""
with self.assertRaises(lowercase ):
A_ : List[Any] = FlaxBertModel.from_pretrained(lowercase )
A_ : Tuple = FlaxBertModel.from_pretrained(lowercase , subfolder=lowercase )
self.assertIsNotNone(lowercase ) | 686 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase :Optional[int] = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2
A_ : List[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ )
class _lowerCAmelCase ( __UpperCAmelCase ):
pass
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase ):
super().__init__()
A_ : int = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 )
A_ : str = deepcopy(self.diffusion )
A_ : Optional[int] = torch.quasirandom.SobolEngine(1 , scramble=lowercase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = MODELS_MAP[model_name]["""url"""]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCamelCase :str = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCamelCase :int = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCamelCase :List[Any] = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCamelCase :Optional[Any] = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return name.replace(lowerCamelCase__ , lowerCamelCase__ )
elif name.startswith(lowerCamelCase__ ):
return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def a ( lowerCamelCase__ , lowerCamelCase__=13 ):
'''simple docstring'''
A_ : Union[str, Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
A_ : Dict = 0
if string.startswith("""net.3.""" ):
depth += 1
A_ : int = string[6:]
elif string.startswith("""net.""" ):
A_ : Tuple = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
A_ : Dict = string[7:]
if string.startswith("""main.""" ):
A_ : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
A_ : Optional[Any] = string[:2]
A_ : Optional[Any] = string[2:]
else:
A_ : List[Any] = string[0]
A_ : Dict = string[1:]
if depth == max_depth:
A_ : Optional[int] = MID_NUM_TO_LAYER[layer_num]
A_ : Optional[Any] = """mid_block"""
elif depth > 0 and int(lowerCamelCase__ ) < 7:
A_ : Any = DOWN_NUM_TO_LAYER[layer_num]
A_ : Union[str, Any] = f'down_blocks.{depth}'
elif depth > 0 and int(lowerCamelCase__ ) > 7:
A_ : List[str] = UP_NUM_TO_LAYER[layer_num]
A_ : List[str] = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
A_ : str = DEPTH_0_TO_LAYER[layer_num]
A_ : Dict = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
A_ : Optional[int] = string_left[1:]
if "resnets" in new_layer:
A_ : Tuple = convert_resconv_naming(lowerCamelCase__ )
elif "attentions" in new_layer:
A_ : Optional[int] = convert_attn_naming(lowerCamelCase__ )
A_ : Dict = new_string_left
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = prefix + """.""" + new_layer + """.""" + string_left
else:
A_ : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
A_ : List[Any] = rename(lowerCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Tuple = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
A_ : int = v
return new_state_dict
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
A_ : Optional[Any] = v[:, :, 0]
else:
# bias
A_ : Union[str, Any] = v
else:
# qkv matrices
A_ : Optional[int] = v.shape[0]
A_ : str = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A_ : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A_ : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
A_ : int = download(lowerCamelCase__ )
A_ : Any = MODELS_MAP[model_name]["""sample_rate"""]
A_ : List[Any] = MODELS_MAP[model_name]["""sample_size"""]
A_ : Tuple = Object()
A_ : Union[str, Any] = sample_size
A_ : Tuple = sample_rate
A_ : int = 0
A_ : List[Any] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ )
A_ : Optional[Any] = diffusers_model.state_dict()
A_ : Dict = DiffusionUncond(lowerCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] )
A_ : Any = orig_model.diffusion_ema.eval()
A_ : Any = orig_model.state_dict()
A_ : List[str] = rename_orig_weights(lowerCamelCase__ )
A_ : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
A_ : str = value.squeeze()
A_ : Union[str, Any] = value
diffusers_model.load_state_dict(lowerCamelCase__ )
A_ : Optional[Any] = 1_00
A_ : Union[str, Any] = 33
A_ : Any = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ )
A_ : List[str] = torch.manual_seed(lowerCamelCase__ )
A_ : Any = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ )
A_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1]
A_ : List[Any] = get_crash_schedule(lowerCamelCase__ )
A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A_ : str = torch.manual_seed(33 )
A_ : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios
A_ : Optional[int] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} )
A_ : str = generated.clamp(-1 , 1 )
A_ : List[Any] = (generated - audio).abs().sum()
A_ : int = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , lowerCamelCase__ )
print("""Diff max""" , lowerCamelCase__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase :List[str] = parser.parse_args()
main(args) | 686 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase :Tuple = {'''configuration_focalnet''': ['''FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FocalNetConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :List[str] = [
'''FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FocalNetForImageClassification''',
'''FocalNetForMaskedImageModeling''',
'''FocalNetBackbone''',
'''FocalNetModel''',
'''FocalNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 |
'''simple docstring'''
from math import factorial
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
A_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A_ : Union[str, Any] = float(factorial(lowerCamelCase__ ) )
coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75)) | 686 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
lowerCamelCase :Dict = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'roformer'
def __init__(self , lowercase=50000 , lowercase=None , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=1536 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase=False , lowercase=True , **lowercase , ):
super().__init__(pad_token_id=lowercase , **lowercase )
A_ : Optional[int] = vocab_size
A_ : List[Any] = hidden_size if embedding_size is None else embedding_size
A_ : Tuple = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Optional[int] = hidden_act
A_ : Any = intermediate_size
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[int] = type_vocab_size
A_ : int = initializer_range
A_ : List[Any] = layer_norm_eps
A_ : Tuple = rotary_value
A_ : Union[str, Any] = use_cache
class _lowerCAmelCase ( __UpperCAmelCase ):
@property
def _a (self ):
if self.task == "multiple-choice":
A_ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : str = {0: """batch""", 1: """sequence"""}
A_ : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] ) | 686 |
'''simple docstring'''
import re
def a ( lowerCamelCase__ ):
'''simple docstring'''
if len(re.findall("""[ATCG]""" , lowerCamelCase__ ) ) != len(lowerCamelCase__ ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def a ( lowerCamelCase__ , lowerCamelCase__=0 ):
'''simple docstring'''
return sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x[column] )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=float("""inf""" ) ):
'''simple docstring'''
for i in range(points_counts - 1 ):
for j in range(i + 1 , lowerCamelCase__ ):
A_ : Union[str, Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
A_ : Tuple = current_dis
return min_dis
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=float("""inf""" ) ):
'''simple docstring'''
for i in range(min(6 , points_counts - 1 ) , lowerCamelCase__ ):
for j in range(max(0 , i - 6 ) , lowerCamelCase__ ):
A_ : Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
A_ : Any = current_dis
return min_dis
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if points_counts <= 3:
return dis_between_closest_pair(lowerCamelCase__ , lowerCamelCase__ )
# recursion
A_ : List[str] = points_counts // 2
A_ : str = closest_pair_of_points_sqr(
lowerCamelCase__ , points_sorted_on_y[:mid] , lowerCamelCase__ )
A_ : Any = closest_pair_of_points_sqr(
lowerCamelCase__ , points_sorted_on_y[mid:] , points_counts - mid )
A_ : str = min(lowerCamelCase__ , lowerCamelCase__ )
A_ : Any = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(lowerCamelCase__ )
A_ : int = dis_between_closest_in_strip(
lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
return min(lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = column_based_sort(lowerCamelCase__ , column=0 )
A_ : str = column_based_sort(lowerCamelCase__ , column=1 )
return (
closest_pair_of_points_sqr(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
) ** 0.5
if __name__ == "__main__":
lowerCamelCase :Tuple = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points))) | 686 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase__ ):
http_head("""https://huggingface.co""" ) | 686 | 1 |
'''simple docstring'''
from torch import nn
def a ( lowerCamelCase__ ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' ) | 686 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase :Any = re.compile(R'''\s+''')
def a ( lowerCamelCase__ ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def a ( lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""]
A_ : Optional[int] = example["""content"""].splitlines()
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ):
'''simple docstring'''
A_ : Any = ["""unit tests""", """test file""", """configuration file"""]
A_ : List[str] = example["""content"""].splitlines()
A_ : str = 0
A_ : Union[str, Any] = 0
# first test
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : List[Any] = example["""content"""].count("""\n""" )
A_ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = ["""def """, """class """, """for """, """while """]
A_ : Optional[int] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a ( lowerCamelCase__ , lowerCamelCase__=4 ):
'''simple docstring'''
A_ : Tuple = example["""content"""].splitlines()
A_ : int = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""]
A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ )
return {"ratio": ratio}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = {}
results.update(get_hash(lowerCamelCase__ ) )
results.update(line_stats(lowerCamelCase__ ) )
results.update(alpha_stats(lowerCamelCase__ ) )
results.update(char_token_ratio(lowerCamelCase__ ) )
results.update(is_autogenerated(lowerCamelCase__ ) )
results.update(is_config_or_test(lowerCamelCase__ ) )
results.update(has_no_keywords(lowerCamelCase__ ) )
results.update(has_few_assignments(lowerCamelCase__ ) )
return results
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a ( lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """rb""" ) as f_in:
with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
os.unlink(lowerCamelCase__ )
# Settings
lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCamelCase :Tuple = parser.parse_args()
if args.num_workers is None:
lowerCamelCase :Tuple = multiprocessing.cpu_count()
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase :List[Any] = time.time()
lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCamelCase :int = time.time()
lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCamelCase :int = set(ds.unique('''hash'''))
lowerCamelCase :List[str] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCamelCase :Dict = time.time()
lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase :List[str] = time.time()
lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCamelCase :int = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
lowerCamelCase :Tuple = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
lowerCamelCase :Tuple = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 686 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = 3_84
A_ : Union[str, Any] = 7
if "tiny" in model_name:
A_ : Optional[int] = 96
A_ : Optional[Any] = (2, 2, 6, 2)
A_ : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
A_ : int = 96
A_ : Optional[int] = (2, 2, 18, 2)
A_ : Optional[int] = (3, 6, 12, 24)
elif "base" in model_name:
A_ : List[Any] = 1_28
A_ : List[Any] = (2, 2, 18, 2)
A_ : Optional[Any] = (4, 8, 16, 32)
A_ : List[Any] = 12
A_ : int = 5_12
elif "large" in model_name:
A_ : List[Any] = 1_92
A_ : Union[str, Any] = (2, 2, 18, 2)
A_ : List[Any] = (6, 12, 24, 48)
A_ : int = 12
A_ : Any = 7_68
# set label information
A_ : str = 1_50
A_ : Union[str, Any] = """huggingface/label-files"""
A_ : Optional[int] = """ade20k-id2label.json"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : Dict = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
A_ : str = {v: k for k, v in idalabel.items()}
A_ : Optional[int] = SwinConfig(
embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , num_heads=lowerCamelCase__ , window_size=lowerCamelCase__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
A_ : Any = UperNetConfig(
backbone_config=lowerCamelCase__ , auxiliary_in_channels=lowerCamelCase__ , num_labels=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , )
return config
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = dct.pop(lowerCamelCase__ )
A_ : Union[str, Any] = val
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A_ : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
A_ : Optional[Any] = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A_ : str = in_proj_weight[:dim, :]
A_ : Optional[int] = in_proj_bias[: dim]
A_ : Dict = in_proj_weight[
dim : dim * 2, :
]
A_ : Optional[Any] = in_proj_bias[
dim : dim * 2
]
A_ : Tuple = in_proj_weight[
-dim :, :
]
A_ : str = in_proj_bias[-dim :]
# fmt: on
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_, A_ : Union[str, Any] = x.shape
A_ : Dict = x.reshape(lowerCamelCase__ , 4 , in_channel // 4 )
A_ : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase__ , lowerCamelCase__ )
return x
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_, A_ : Optional[Any] = x.shape
A_ : Any = x.reshape(lowerCamelCase__ , in_channel // 4 , 4 )
A_ : Optional[int] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase__ , lowerCamelCase__ )
return x
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = x.shape[0]
A_ : str = x.reshape(4 , in_channel // 4 )
A_ : List[Any] = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase__ )
return x
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = x.shape[0]
A_ : List[Any] = x.reshape(in_channel // 4 , 4 )
A_ : Optional[int] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase__ )
return x
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
A_ : int = model_name_to_url[model_name]
A_ : List[str] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , file_name=lowerCamelCase__ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(lowerCamelCase__ , param.shape )
A_ : Union[str, Any] = get_upernet_config(lowerCamelCase__ )
A_ : List[str] = UperNetForSemanticSegmentation(lowerCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A_ : List[Any] = state_dict.pop(lowerCamelCase__ )
if "bn" in key:
A_ : int = key.replace("""bn""" , """batch_norm""" )
A_ : Union[str, Any] = val
# rename keys
A_ : List[str] = create_rename_keys(lowerCamelCase__ )
for src, dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
read_in_q_k_v(lowerCamelCase__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
A_ : List[str] = reverse_correct_unfold_reduction_order(lowerCamelCase__ )
if "norm" in key:
A_ : Tuple = reverse_correct_unfold_norm_order(lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# verify on image
A_ : str = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
A_ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert("""RGB""" )
A_ : List[str] = SegformerImageProcessor()
A_ : Optional[int] = processor(lowerCamelCase__ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
A_ : Union[str, Any] = model(lowerCamelCase__ )
A_ : str = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
A_ : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
A_ : List[str] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
A_ : Union[str, Any] = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
A_ : str = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase__ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
lowerCamelCase :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[F"upernet-swin-{size}" for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 686 |
'''simple docstring'''
import pytest
lowerCamelCase :Optional[Any] = '''__dummy_dataset1__'''
lowerCamelCase :List[Any] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = dataset_loading_script_name
A_ : int = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase__ )
A_ : Tuple = script_dir / f'{script_name}.py'
with open(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ ) | 686 | 1 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCamelCase :Optional[List[str]] = None
lowerCamelCase :Any = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCamelCase :str = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : bool = True
__SCREAMING_SNAKE_CASE : Optional[str] = None
# Automatically constructed
__SCREAMING_SNAKE_CASE : ClassVar[str] = "PIL.Image.Image"
__SCREAMING_SNAKE_CASE : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__SCREAMING_SNAKE_CASE : str = field(default='Image' , init=__UpperCAmelCase , repr=__UpperCAmelCase )
def __call__(self ):
return self.pa_type
def _a (self , lowercase ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = np.array(lowercase )
if isinstance(lowercase , lowercase ):
return {"path": value, "bytes": None}
elif isinstance(lowercase , lowercase ):
return {"path": None, "bytes": value}
elif isinstance(lowercase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowercase )
elif isinstance(lowercase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowercase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
F'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def _a (self , lowercase , lowercase=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
A_ : Union[str, Any] = {}
A_, A_ : Optional[Any] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(F'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(lowercase ):
A_ : Optional[int] = PIL.Image.open(lowercase )
else:
A_ : Optional[int] = path.split("""::""" )[-1]
try:
A_ : Tuple = string_to_dict(lowercase , config.HUB_DATASETS_URL )["""repo_id"""]
A_ : List[str] = token_per_repo_id.get(lowercase )
except ValueError:
A_ : Optional[int] = None
with xopen(lowercase , """rb""" , use_auth_token=lowercase ) as f:
A_ : str = BytesIO(f.read() )
A_ : str = PIL.Image.open(bytes_ )
else:
A_ : List[str] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _a (self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def _a (self , lowercase ):
if pa.types.is_string(storage.type ):
A_ : int = pa.array([None] * len(lowercase ) , type=pa.binary() )
A_ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A_ : Optional[Any] = pa.array([None] * len(lowercase ) , type=pa.string() )
A_ : List[str] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
A_ : int = storage.field("""bytes""" )
else:
A_ : List[Any] = pa.array([None] * len(lowercase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
A_ : List[Any] = storage.field("""path""" )
else:
A_ : str = pa.array([None] * len(lowercase ) , type=pa.string() )
A_ : str = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
A_ : List[Any] = pa.array(
[encode_np_array(np.array(lowercase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
A_ : Dict = pa.array([None] * len(lowercase ) , type=pa.string() )
A_ : Union[str, Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase , self.pa_type )
def _a (self , lowercase ):
@no_op_if_value_is_null
def path_to_bytes(lowercase ):
with xopen(lowercase , """rb""" ) as f:
A_ : int = f.read()
return bytes_
A_ : int = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A_ : List[str] = pa.array(
[os.path.basename(lowercase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
A_ : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase , self.pa_type )
def a ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
A_ : Optional[int] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = BytesIO()
if image.format in list_image_compression_formats():
A_ : List[str] = image.format
else:
A_ : Optional[Any] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(lowerCamelCase__ , format=lowerCamelCase__ )
return buffer.getvalue()
def a ( lowerCamelCase__ ):
'''simple docstring'''
if hasattr(lowerCamelCase__ , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCamelCase__ )}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
A_ : List[Any] = array.dtype
A_ : Dict = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
A_ : List[Any] = dtype.kind
A_ : str = dtype.itemsize
A_ : str = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
A_ : Union[str, Any] = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
A_ : Optional[int] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
A_ : Optional[int] = dtype_byteorder + dtype_kind + str(lowerCamelCase__ )
A_ : Dict = np.dtype(lowerCamelCase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
A_ : Dict = PIL.Image.fromarray(array.astype(lowerCamelCase__ ) )
return {"path": None, "bytes": image_to_bytes(lowerCamelCase__ )}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
A_, A_ : Union[str, Any] = first_non_null_value(lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCamelCase__ , np.ndarray ):
A_ : Optional[Any] = no_op_if_value_is_null(lowerCamelCase__ )
return [obj_to_image_dict_func(lowerCamelCase__ ) for obj in objs]
elif isinstance(lowerCamelCase__ , PIL.Image.Image ):
A_ : str = no_op_if_value_is_null(lowerCamelCase__ )
return [obj_to_image_dict_func(lowerCamelCase__ ) for obj in objs]
else:
return objs
else:
return objs | 686 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCamelCase :int = datasets.load_iris()
lowerCamelCase :str = np.array(data['''data'''])
lowerCamelCase :Dict = np.array(data['''target'''])
lowerCamelCase :Union[str, Any] = data['''target_names''']
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
A_ : List[str] = []
for data_point in data:
A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 686 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=3 , lowercase=None , ):
A_ : Union[str, Any] = parent
A_ : Tuple = batch_size
A_ : List[str] = image_size
A_ : Dict = patch_size
A_ : List[Any] = num_channels
A_ : Optional[Any] = is_training
A_ : List[Any] = use_labels
A_ : List[Any] = hidden_size
A_ : Any = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Optional[Any] = intermediate_size
A_ : Dict = hidden_act
A_ : Union[str, Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : str = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : List[str] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Optional[Any] = (image_size // patch_size) ** 2
A_ : int = num_patches + 1
def _a (self ):
A_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : List[Any] = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def _a (self ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Any = TFViTModel(config=lowercase )
A_ : Any = model(lowercase , training=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
A_ : List[str] = self.image_size // 2
A_ : str = pixel_values[:, :, :image_size, :image_size]
A_ : str = model(lowercase , interpolate_pos_encoding=lowercase , training=lowercase )
A_ : List[str] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[str] = self.type_sequence_label_size
A_ : Optional[int] = TFViTForImageClassification(lowercase )
A_ : Optional[Any] = model(lowercase , labels=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
A_ : Union[str, Any] = self.image_size // 2
A_ : List[Any] = pixel_values[:, :, :image_size, :image_size]
A_ : List[str] = model(lowercase , interpolate_pos_encoding=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : List[str] = 1
A_ : Tuple = TFViTForImageClassification(lowercase )
A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Optional[Any] = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a (self ):
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_, A_, A_ : List[Any] = config_and_inputs
A_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE : Optional[Any] = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : List[str] = False
def _a (self ):
A_ : List[Any] = TFViTModelTester(self )
A_ : Tuple = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def _a (self ):
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def _a (self ):
pass
def _a (self ):
A_, A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Any = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A_ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , tf.keras.layers.Layer ) )
def _a (self ):
A_, A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Optional[int] = [*signature.parameters.keys()]
A_ : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _a (self ):
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def _a (self ):
A_ : Union[str, Any] = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(lowercase )
def a ( ):
'''simple docstring'''
A_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def _a (self ):
A_ : Optional[Any] = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
A_ : Any = self.default_image_processor
A_ : Tuple = prepare_img()
A_ : Optional[Any] = image_processor(images=lowercase , return_tensors="""tf""" )
# forward pass
A_ : Dict = model(**lowercase )
# verify the logits
A_ : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : List[Any] = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 ) | 686 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__(self , lowercase , lowercase , lowercase ):
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
A_ : Tuple = [sequences]
A_ : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ):
A_ : int = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a (self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ):
A_ : Any = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A_ : str = self.tokenizer.eos_token
try:
A_ : str = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ : Any = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a (self , **lowercase ):
if kwargs.get("""multi_class""" , lowercase ) is not None:
A_ : Tuple = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A_ : Optional[Any] = {}
if "candidate_labels" in kwargs:
A_ : str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A_ : List[str] = kwargs["""hypothesis_template"""]
A_ : List[Any] = {}
if "multi_label" in kwargs:
A_ : Optional[Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__(self , lowercase , *lowercase , **lowercase , ):
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
A_ : Union[str, Any] = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowercase , **lowercase )
def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ):
A_, A_ : int = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
A_ : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def _a (self , lowercase ):
A_ : Optional[Any] = inputs["""candidate_label"""]
A_ : List[Any] = inputs["""sequence"""]
A_ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ : List[str] = self.model(**lowercase )
A_ : str = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a (self , lowercase , lowercase=False ):
A_ : Any = [outputs["""candidate_label"""] for outputs in model_outputs]
A_ : str = [outputs["""sequence"""] for outputs in model_outputs]
A_ : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A_ : Dict = logits.shape[0]
A_ : Any = len(lowercase )
A_ : List[str] = N // n
A_ : Tuple = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ : Union[str, Any] = self.entailment_id
A_ : Any = -1 if entailment_id == 0 else 0
A_ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ : Union[str, Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Optional[Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ : Optional[int] = reshaped_outputs[..., self.entailment_id]
A_ : int = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Any = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 686 | 1 |
'''simple docstring'''
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = XCLIPTextConfig()
# derive patch size from model name
A_ : Optional[Any] = model_name.find("""patch""" )
A_ : List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
A_ : str = XCLIPVisionConfig(patch_size=lowerCamelCase__ , num_frames=lowerCamelCase__ )
if "large" in model_name:
A_ : List[str] = 7_68
A_ : List[str] = 30_72
A_ : Any = 12
A_ : int = 10_24
A_ : Tuple = 40_96
A_ : Dict = 16
A_ : Union[str, Any] = 24
A_ : Dict = 7_68
A_ : Dict = 30_72
if model_name == "xclip-large-patch14-16-frames":
A_ : Union[str, Any] = 3_36
A_ : Optional[Any] = XCLIPConfig.from_text_vision_configs(lowerCamelCase__ , lowerCamelCase__ )
if "large" in model_name:
A_ : Union[str, Any] = 7_68
return config
def a ( lowerCamelCase__ ):
'''simple docstring'''
if name == "token_embedding.weight":
A_ : Optional[Any] = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
A_ : Optional[Any] = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
A_ : Any = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
A_ : List[Any] = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
A_ : Optional[Any] = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
A_ : List[Any] = name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
A_ : Optional[int] = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
A_ : Optional[int] = name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
A_ : Optional[int] = name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
A_ : List[Any] = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
A_ : Union[str, Any] = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
A_ : List[Any] = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
A_ : int = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
A_ : int = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
A_ : Dict = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
A_ : Union[str, Any] = name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
A_ : Dict = name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
A_ : Dict = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
A_ : Any = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
A_ : str = name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
A_ : Dict = name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
A_ : str = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A_ : List[str] = orig_state_dict.pop(lowerCamelCase__ )
if "attn.in_proj" in key:
A_ : Any = key.split(""".""" )
if key.startswith("""visual""" ):
A_ : Dict = key_split[3]
A_ : Tuple = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
A_ : Optional[int] = val[
:dim, :
]
A_ : List[str] = val[
dim : dim * 2, :
]
A_ : Optional[int] = val[
-dim:, :
]
else:
A_ : Union[str, Any] = val[
:dim
]
A_ : List[str] = val[
dim : dim * 2
]
A_ : int = val[
-dim:
]
else:
if "weight" in key:
A_ : Union[str, Any] = val[
:dim, :
]
A_ : Optional[Any] = val[
dim : dim * 2, :
]
A_ : Union[str, Any] = val[
-dim:, :
]
else:
A_ : List[Any] = val[:dim]
A_ : Any = val[
dim : dim * 2
]
A_ : Optional[Any] = val[-dim:]
elif key.startswith("""mit""" ):
A_ : Any = key_split[2]
A_ : Optional[Any] = config.vision_config.mit_hidden_size
if "weight" in key:
A_ : Optional[int] = val[:dim, :]
A_ : Dict = val[dim : dim * 2, :]
A_ : Tuple = val[-dim:, :]
else:
A_ : List[str] = val[:dim]
A_ : List[str] = val[dim : dim * 2]
A_ : List[Any] = val[-dim:]
else:
A_ : List[Any] = key_split[2]
A_ : str = config.text_config.hidden_size
if "weight" in key:
A_ : Optional[int] = val[:dim, :]
A_ : Optional[int] = val[
dim : dim * 2, :
]
A_ : Optional[Any] = val[-dim:, :]
else:
A_ : List[Any] = val[:dim]
A_ : Optional[int] = val[
dim : dim * 2
]
A_ : Any = val[-dim:]
else:
A_ : Dict = rename_key(lowerCamelCase__ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
A_ : str = val.T
A_ : str = val
return orig_state_dict
def a ( lowerCamelCase__ ):
'''simple docstring'''
if num_frames == 8:
A_ : Optional[int] = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
A_ : Dict = """eating_spaghetti.npy"""
elif num_frames == 32:
A_ : Union[str, Any] = """eating_spaghetti_32_frames.npy"""
A_ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=lowerCamelCase__ , repo_type="""dataset""" , )
A_ : Optional[int] = np.load(lowerCamelCase__ )
return list(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=False ):
'''simple docstring'''
A_ : Optional[int] = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
A_ : Union[str, Any] = model_to_url[model_name]
A_ : List[str] = 8
if "16-frames" in model_name:
A_ : str = 16
elif "shot" in model_name:
A_ : List[str] = 32
A_ : Tuple = get_xclip_config(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = XCLIPModel(lowerCamelCase__ )
model.eval()
if "drive" in checkpoint_url:
A_ : Union[str, Any] = """pytorch_model.bin"""
gdown.cached_download(lowerCamelCase__ , lowerCamelCase__ , quiet=lowerCamelCase__ )
A_ : List[Any] = torch.load(lowerCamelCase__ , map_location="""cpu""" )["""model"""]
else:
A_ : Dict = torch.hub.load_state_dict_from_url(lowerCamelCase__ )["""model"""]
A_ : Optional[Any] = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[int] = XCLIPModel(lowerCamelCase__ )
A_, A_ : Union[str, Any] = model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
A_ : Any = 3_36 if model_name == """xclip-large-patch14-16-frames""" else 2_24
A_ : Optional[int] = VideoMAEImageProcessor(size=lowerCamelCase__ )
A_ : Tuple = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
A_ : Dict = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
A_ : int = XCLIPProcessor(image_processor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
A_ : int = prepare_video(lowerCamelCase__ )
A_ : List[Any] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=lowerCamelCase__ , return_tensors="""pt""" , padding=lowerCamelCase__ )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase__ )
# Verify outputs
A_ : str = outputs.logits_per_video
A_ : str = logits_per_video.softmax(dim=1 )
print("""Probs:""" , lowerCamelCase__ )
# kinetics-400
if model_name == "xclip-base-patch32":
A_ : int = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
A_ : Dict = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
A_ : Union[str, Any] = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
A_ : int = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
A_ : int = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
A_ : Dict = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
A_ : Tuple = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
A_ : Any = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
A_ : str = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
A_ : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
A_ : Union[str, Any] = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
A_ : int = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
A_ : str = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
A_ : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
A_ : Dict = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
A_ : int = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
A_ : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
A_ : Dict = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(f'Model name {model_name} not supported' )
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(lowerCamelCase__ , organization="""nielsr""" )
processor.push_to_hub(lowerCamelCase__ , organization="""nielsr""" )
slow_tokenizer.push_to_hub(lowerCamelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
lowerCamelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :Any = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 686 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :int = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'yolos'
def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : int = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : List[str] = image_size
A_ : str = patch_size
A_ : int = num_channels
A_ : Optional[int] = qkv_bias
A_ : List[Any] = num_detection_tokens
A_ : Tuple = use_mid_position_embeddings
A_ : int = auxiliary_loss
# Hungarian matcher
A_ : int = class_cost
A_ : List[Any] = bbox_cost
A_ : Optional[int] = giou_cost
# Loss coefficients
A_ : Any = bbox_loss_coefficient
A_ : List[Any] = giou_loss_coefficient
A_ : str = eos_coefficient
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4
@property
def _a (self ):
return 12 | 686 | 1 |
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = []
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase__ ) )
elif isinstance(lowerCamelCase__ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase__ ) )
elif isinstance(lowerCamelCase__ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = []
for d in reversed(lowerCamelCase__ ):
idx.append(flat_idx % d )
A_ : Dict = flat_idx // d
return tuple(reversed(lowerCamelCase__ ) )
@torch.jit.ignore
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
'''simple docstring'''
def reduce_edge_list(lowerCamelCase__ ) -> None:
A_ : str = True
for i in range(len(lowerCamelCase__ ) ):
A_ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
A_ : Any = l[reversed_idx]
if start_edges is None:
A_ : int = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase__ )
if end_edges is None:
A_ : Dict = [e == (d - 1) for e, d in zip(lowerCamelCase__ , lowerCamelCase__ )]
reduce_edge_list(lowerCamelCase__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase__ ) == 0:
return [()]
elif len(lowerCamelCase__ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
A_ : List[Tuple[slice, ...]] = []
A_ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase__ , lowerCamelCase__ ):
if s == e:
path_list.append(slice(lowerCamelCase__ , s + 1 ) )
else:
break
A_ : Tuple[slice, ...] = tuple(lowerCamelCase__ )
A_ : int = len(lowerCamelCase__ )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
A_ : Union[str, Any] = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase__ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
A_ : Union[str, Any] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase__ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
A_ : List[Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = t.shape[:no_batch_dims]
A_ : str = list(_flat_idx_to_idx(lowerCamelCase__ , lowerCamelCase__ ) )
# _get_minimal_slice_set is inclusive
A_ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase__ ) )
# Get an ordered list of slices to perform
A_ : Tuple = _get_minimal_slice_set(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
A_ : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = False , ):
'''simple docstring'''
if not (len(lowerCamelCase__ ) > 0):
raise ValueError("""Must provide at least one input""" )
A_ : Optional[int] = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase__ )]
A_ : List[Any] = tuple([max(lowerCamelCase__ ) for s in zip(*lowerCamelCase__ )] )
def _prep_inputs(lowerCamelCase__ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
A_ : List[str] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
A_ : Tuple = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
A_ : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
A_ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase__ )
A_ : Tuple = None
if _out is not None:
A_ : Dict = tensor_tree_map(lambda lowerCamelCase__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
A_ : Any = 1
for d in orig_batch_dims:
flat_batch_dim *= d
A_ : Any = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase__ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
A_ : Dict = 0
A_ : Dict = prepped_outputs
for _ in range(lowerCamelCase__ ):
# Chunk the input
if not low_mem:
A_ : Optional[int] = _select_chunk
else:
A_ : Any = partial(
_chunk_slice , flat_start=lowerCamelCase__ , flat_end=min(lowerCamelCase__ , i + chunk_size ) , no_batch_dims=len(lowerCamelCase__ ) , )
A_ : Dict[str, Any] = tensor_tree_map(lowerCamelCase__ , lowerCamelCase__ )
# Run the layer on the chunk
A_ : Tuple = layer(**lowerCamelCase__ )
# Allocate space for the output
if out is None:
A_ : List[Any] = tensor_tree_map(lambda lowerCamelCase__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase__ )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
def assign(lowerCamelCase__ , lowerCamelCase__ ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
assign(lowerCamelCase__ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
A_ : Union[str, Any] = da[k]
assign(lowerCamelCase__ , lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
for xa, xa in zip(lowerCamelCase__ , lowerCamelCase__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
A_ : Optional[int] = xa
elif isinstance(lowerCamelCase__ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
A_ : int = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
A_ : Any = tensor_tree_map(lambda lowerCamelCase__ : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase__ )
return out
class _lowerCAmelCase :
def __init__(self , lowercase = 512 , ):
A_ : Any = max_chunk_size
A_ : Optional[int] = None
A_ : Optional[tuple] = None
def _a (self , lowercase , lowercase , lowercase ):
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
A_ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
A_ : int = [c for c in candidates if c > min_chunk_size]
A_ : List[str] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowercase ) -> bool:
try:
with torch.no_grad():
fn(*lowercase , chunk_size=lowercase )
return True
except RuntimeError:
return False
A_ : Union[str, Any] = 0
A_ : Optional[int] = len(lowercase ) - 1
while i > min_viable_chunk_size_index:
A_ : Optional[Any] = test_chunk_size(candidates[i] )
if not viable:
A_ : Optional[int] = (min_viable_chunk_size_index + i) // 2
else:
A_ : int = i
A_ : Tuple = (i + len(lowercase ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _a (self , lowercase , lowercase ):
A_ : Any = True
for aa, aa in zip(lowercase , lowercase ):
assert type(lowercase ) == type(lowercase )
if isinstance(lowercase , (list, tuple) ):
consistent &= self._compare_arg_caches(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
A_ : Optional[Any] = [v for _, v in sorted(aa.items() , key=lambda lowercase : x[0] )]
A_ : Optional[int] = [v for _, v in sorted(aa.items() , key=lambda lowercase : x[0] )]
consistent &= self._compare_arg_caches(lowercase , lowercase )
else:
consistent &= aa == aa
return consistent
def _a (self , lowercase , lowercase , lowercase , ):
A_ : Union[str, Any] = True
A_ : tuple = tree_map(lambda lowercase : a.shape if isinstance(lowercase , torch.Tensor ) else a , lowercase , lowercase )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(lowercase )
A_ : int = self._compare_arg_caches(self.cached_arg_data , lowercase )
else:
# Otherwise, we can reuse the precomputed value
A_ : Dict = False
if not consistent:
A_ : Optional[int] = self._determine_favorable_chunk_size(
lowercase , lowercase , lowercase , )
A_ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size | 686 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowerCamelCase :int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCamelCase :int = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowerCamelCase :Optional[Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _a (self , lowercase=None , lowercase=None , lowercase=False ):
if concatenate_texts:
return compute_measures(lowercase , lowercase )["wer"]
else:
A_ : List[Any] = 0
A_ : Optional[int] = 0
for prediction, reference in zip(lowercase , lowercase ):
A_ : Any = compute_measures(lowercase , lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 686 | 1 |
'''simple docstring'''
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
lowerCamelCase :int = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
lowerCamelCase :Optional[int] = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
lowerCamelCase :int = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def _a (self , lowercase , lowercase , lowercase=None , lowercase=True , lowercase=False ):
if rouge_types is None:
A_ : List[Any] = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
A_ : List[Any] = rouge_scorer.RougeScorer(rouge_types=lowercase , use_stemmer=lowercase )
if use_aggregator:
A_ : Tuple = scoring.BootstrapAggregator()
else:
A_ : Dict = []
for ref, pred in zip(lowercase , lowercase ):
A_ : Any = scorer.score(lowercase , lowercase )
if use_aggregator:
aggregator.add_scores(lowercase )
else:
scores.append(lowercase )
if use_aggregator:
A_ : List[str] = aggregator.aggregate()
else:
A_ : List[str] = {}
for key in scores[0]:
A_ : Dict = [score[key] for score in scores]
return result | 686 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a (self ):
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : int = CLIPTextModel(lowercase )
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a (self , lowercase , lowercase=0 ):
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : int = image / 2 + 0.5
if str(lowercase ).startswith("""mps""" ):
A_ : int = torch.manual_seed(lowercase )
else:
A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a (self ):
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = CycleDiffusionPipeline(**lowercase )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = self.get_dummy_inputs(lowercase )
A_ : str = pipe(**lowercase )
A_ : str = output.images
A_ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a (self ):
A_ : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase , """half""" ):
A_ : List[str] = module.half()
A_ : List[Any] = CycleDiffusionPipeline(**lowercase )
A_ : Optional[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Any = self.get_dummy_inputs(lowercase )
A_ : Tuple = pipe(**lowercase )
A_ : List[str] = output.images
A_ : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a (self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def _a (self ):
return super().test_inference_batch_single_identical()
@skip_mps
def _a (self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a (self ):
return super().test_save_load_optional_components()
@skip_mps
def _a (self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A_ : List[str] = init_image.resize((512, 512) )
A_ : Dict = """CompVis/stable-diffusion-v1-4"""
A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : Any = CycleDiffusionPipeline.from_pretrained(
lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : str = """A black colored car"""
A_ : Dict = """A blue colored car"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : str = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _a (self ):
A_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A_ : Optional[int] = init_image.resize((512, 512) )
A_ : Optional[int] = """CompVis/stable-diffusion-v1-4"""
A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : Optional[Any] = """A black colored car"""
A_ : int = """A blue colored car"""
A_ : str = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : int = output.images
assert np.abs(image - expected_image ).max() < 2E-2 | 686 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :str = logging.get_logger(__name__)
lowerCamelCase :str = {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/config.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/config.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/config.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/config.json''',
'''bert-base-multilingual-uncased''': '''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json''',
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/config.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/config.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'''
),
'''bert-base-cased-finetuned-mrpc''': '''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json''',
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json''',
'''bert-base-german-dbmdz-uncased''': '''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese''': '''https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json''',
'''cl-tohoku/bert-base-japanese-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'''
),
'''cl-tohoku/bert-base-japanese-char-whole-word-masking''': (
'''https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'''
),
'''wietsedv/bert-base-dutch-cased''': '''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json''',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[Any] = 'bert'
def __init__(self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ):
super().__init__(pad_token_id=lowercase , **lowercase )
A_ : Tuple = vocab_size
A_ : Union[str, Any] = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Optional[Any] = hidden_act
A_ : Optional[Any] = intermediate_size
A_ : int = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : Optional[int] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Optional[int] = position_embedding_type
A_ : Optional[Any] = use_cache
A_ : Optional[Any] = classifier_dropout
class _lowerCAmelCase ( __UpperCAmelCase ):
@property
def _a (self ):
if self.task == "multiple-choice":
A_ : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Optional[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] ) | 686 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'down'
def _a (self ):
A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'down'
def _a (self ):
A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_, A_ : str = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : int = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'mid'
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'mid'
def _a (self ):
A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common()
A_ : List[str] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'mid'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[int] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : int = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(lowercase ) | 686 | 1 |
'''simple docstring'''
import numpy as np
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1E-12 , lowerCamelCase__ = 1_00 , ):
'''simple docstring'''
assert np.shape(lowerCamelCase__ )[0] == np.shape(lowerCamelCase__ )[1]
# Ensure proper dimensionality.
assert np.shape(lowerCamelCase__ )[0] == np.shape(lowerCamelCase__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCamelCase__ ) == np.iscomplexobj(lowerCamelCase__ )
A_ : int = np.iscomplexobj(lowerCamelCase__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCamelCase__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
A_ : Union[str, Any] = False
A_ : Union[str, Any] = 0
A_ : Union[str, Any] = 0
A_ : Dict = 1E12
while not convergence:
# Multiple matrix by the vector.
A_ : Dict = np.dot(lowerCamelCase__ , lowerCamelCase__ )
# Normalize the resulting output vector.
A_ : Any = w / np.linalg.norm(lowerCamelCase__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
A_ : List[str] = vector.conj().T if is_complex else vector.T
A_ : List[str] = np.dot(lowerCamelCase__ , np.dot(lowerCamelCase__ , lowerCamelCase__ ) )
# Check convergence.
A_ : List[str] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
A_ : Tuple = True
A_ : Optional[int] = lambda_
if is_complex:
A_ : Any = np.real(lambda_ )
return lambda_, vector
def a ( ):
'''simple docstring'''
A_ : int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
A_ : Optional[int] = np.array([41, 4, 20] )
A_ : Any = real_input_matrix.astype(np.complexaaa )
A_ : int = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
A_ : Any = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
A_ : str = real_input_matrix
A_ : List[str] = real_vector
elif problem_type == "complex":
A_ : List[Any] = complex_input_matrix
A_ : List[Any] = complex_vector
# Our implementation.
A_, A_ : Optional[int] = power_iteration(lowerCamelCase__ , lowerCamelCase__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
A_, A_ : Optional[int] = np.linalg.eigh(lowerCamelCase__ )
# Last eigenvalue is the maximum one.
A_ : Tuple = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
A_ : int = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCamelCase__ ) - np.abs(lowerCamelCase__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration() | 686 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
A_ : List[Any] = word_bank or []
# create a table
A_ : int = len(lowerCamelCase__ ) + 1
A_ : list[list[list[str]]] = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
A_ : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
A_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
) | 686 | 1 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=100 , lowercase=13 , lowercase=30 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=4 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=3 , lowercase=None , lowercase=[0, 1, 2, 3] , ):
A_ : List[Any] = parent
A_ : Optional[Any] = 100
A_ : Tuple = batch_size
A_ : str = image_size
A_ : List[Any] = patch_size
A_ : List[str] = num_channels
A_ : int = is_training
A_ : List[str] = use_labels
A_ : List[str] = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : int = intermediate_size
A_ : List[str] = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Tuple = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : Optional[Any] = scope
A_ : List[str] = out_indices
A_ : List[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : int = (image_size // patch_size) ** 2
A_ : Union[str, Any] = num_patches + 1
def _a (self ):
A_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : List[Any] = None
A_ : Dict = None
if self.use_labels:
A_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _a (self ):
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _a (self , lowercase , lowercase , lowercase , lowercase ):
A_ : Optional[Any] = BeitModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Any = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase ):
A_ : int = BeitForMaskedImageModeling(config=lowercase )
model.to(lowercase )
model.eval()
A_ : List[Any] = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase ):
A_ : List[Any] = self.type_sequence_label_size
A_ : Union[str, Any] = BeitForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[int] = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Union[str, Any] = 1
A_ : Any = BeitForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : List[str] = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a (self , lowercase , lowercase , lowercase , lowercase ):
A_ : Optional[Any] = self.num_labels
A_ : Optional[int] = BeitForSemanticSegmentation(lowercase )
model.to(lowercase )
model.eval()
A_ : List[str] = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
A_ : List[Any] = model(lowercase , labels=lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _a (self ):
A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_, A_, A_, A_ : Any = config_and_inputs
A_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Optional[int] = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Dict = False
def _a (self ):
A_ : Tuple = BeitModelTester(self )
A_ : Union[str, Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def _a (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def _a (self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _a (self ):
pass
def _a (self ):
A_, A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def _a (self ):
A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[int] = model_class(lowercase )
A_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _a (self ):
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase )
def _a (self ):
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def _a (self ):
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
def _a (self ):
if not self.model_tester.is_training:
return
A_, A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : int = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowercase ), BeitForMaskedImageModeling]:
continue
A_ : int = model_class(lowercase )
model.to(lowercase )
model.train()
A_ : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : Optional[Any] = model(**lowercase ).loss
loss.backward()
def _a (self ):
A_, A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A_ : Tuple = False
A_ : Union[str, Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowercase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
A_ : List[str] = model_class(lowercase )
model.gradient_checkpointing_enable()
model.to(lowercase )
model.train()
A_ : Tuple = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
A_ : Dict = model(**lowercase ).loss
loss.backward()
def _a (self ):
A_, A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = _config_zero_init(lowercase )
for model_class in self.all_model_classes:
A_ : Optional[int] = model_class(config=lowercase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _a (self ):
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : str = BeitModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a ( ):
'''simple docstring'''
A_ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def _a (self ):
A_ : Union[str, Any] = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(lowercase )
A_ : str = self.default_image_processor
A_ : str = prepare_img()
A_ : Tuple = image_processor(images=lowercase , return_tensors="""pt""" ).pixel_values.to(lowercase )
# prepare bool_masked_pos
A_ : List[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowercase )
# forward pass
with torch.no_grad():
A_ : Optional[int] = model(pixel_values=lowercase , bool_masked_pos=lowercase )
A_ : List[Any] = outputs.logits
# verify the logits
A_ : List[Any] = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , lowercase )
A_ : str = torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(lowercase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowercase , atol=1E-2 ) )
@slow
def _a (self ):
A_ : Dict = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(lowercase )
A_ : str = self.default_image_processor
A_ : Union[str, Any] = prepare_img()
A_ : List[Any] = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
A_ : Union[str, Any] = model(**lowercase )
A_ : str = outputs.logits
# verify the logits
A_ : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(logits.shape , lowercase )
A_ : str = torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(lowercase )
self.assertTrue(torch.allclose(logits[0, :3] , lowercase , atol=1E-4 ) )
A_ : List[Any] = 281
self.assertEqual(logits.argmax(-1 ).item() , lowercase )
@slow
def _a (self ):
A_ : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
lowercase )
A_ : Tuple = self.default_image_processor
A_ : List[Any] = prepare_img()
A_ : List[str] = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
A_ : List[Any] = model(**lowercase )
A_ : List[str] = outputs.logits
# verify the logits
A_ : List[Any] = torch.Size((1, 21841) )
self.assertEqual(logits.shape , lowercase )
A_ : Optional[Any] = torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(lowercase )
self.assertTrue(torch.allclose(logits[0, :3] , lowercase , atol=1E-4 ) )
A_ : Any = 2396
self.assertEqual(logits.argmax(-1 ).item() , lowercase )
@slow
def _a (self ):
A_ : Tuple = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
A_ : Any = model.to(lowercase )
A_ : int = BeitImageProcessor(do_resize=lowercase , size=640 , do_center_crop=lowercase )
A_ : Tuple = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : int = Image.open(ds[0]["""file"""] )
A_ : Dict = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
A_ : str = model(**lowercase )
A_ : List[Any] = outputs.logits
# verify the logits
A_ : Union[str, Any] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , lowercase )
A_ : int = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
A_ : Optional[int] = torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
] , device=lowercase , )
else:
A_ : int = torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
] , device=lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 ) )
@slow
def _a (self ):
A_ : Optional[int] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
A_ : Tuple = model.to(lowercase )
A_ : Tuple = BeitImageProcessor(do_resize=lowercase , size=640 , do_center_crop=lowercase )
A_ : Optional[Any] = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
A_ : Union[str, Any] = Image.open(ds[0]["""file"""] )
A_ : Tuple = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
A_ : int = model(**lowercase )
A_ : Optional[int] = outputs.logits.detach().cpu()
A_ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=lowercase , target_sizes=[(500, 300)] )
A_ : List[str] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowercase )
A_ : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=lowercase )
A_ : int = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , lowercase ) | 686 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = []
A_ : int = set({"""(""", """[""", """{"""} )
A_ : Union[str, Any] = set({""")""", """]""", """}"""} )
A_ : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowerCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase__ ) == 0
def a ( ):
'''simple docstring'''
A_ : int = input("""Enter sequence of brackets: """ )
if is_balanced(lowerCamelCase__ ):
print(lowerCamelCase__ , """is balanced""" )
else:
print(lowerCamelCase__ , """is not balanced""" )
if __name__ == "__main__":
main() | 686 | 1 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
A_ : List[Any] = word_bank or []
# create a table
A_ : int = len(lowerCamelCase__ ) + 1
A_ : list[list[list[str]]] = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
A_ : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
A_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
) | 686 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
A_ : Optional[int] = field
A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
A_ : Optional[Any] = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def _a (self ):
# Build iterable dataset
if self.streaming:
A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : int = None
A_ : Union[str, Any] = None
A_ : int = None
A_ : List[str] = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
A_ : str = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
A_ : Any = dataset
A_ : List[str] = path_or_buf
A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ : Optional[Any] = num_proc
A_ : List[Any] = """utf-8"""
A_ : int = to_json_kwargs
def _a (self ):
A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase )
A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer:
A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
A_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def _a (self , lowercase ):
A_, A_, A_, A_, A_ : List[str] = args
A_ : List[str] = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ : Any = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
A_, A_ : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowercase )
return written | 686 | 1 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
A_ : Union[str, Any] = DatasetInfosDict.from_directory(lowerCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ),
] , )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = str(lowerCamelCase__ )
dataset_info.write_to_directory(lowerCamelCase__ )
A_ : Any = DatasetInfo.from_directory(lowerCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCamelCase__ , """dataset_info.json""" ) )
def a ( ):
'''simple docstring'''
A_ : str = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
A_ : Tuple = dataset_info._to_yaml_dict()
assert sorted(lowerCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
A_ : Optional[Any] = yaml.safe_dump(lowerCamelCase__ )
A_ : Dict = yaml.safe_load(lowerCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def a ( ):
'''simple docstring'''
A_ : Union[str, Any] = DatasetInfo()
A_ : Union[str, Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=42 ),
"""v2""": DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = str(lowerCamelCase__ )
dataset_infos_dict.write_to_directory(lowerCamelCase__ )
A_ : List[str] = DatasetInfosDict.from_directory(lowerCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
A_ : Any = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
A_ : List[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCamelCase__ , """README.md""" ) ) | 686 |
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
A_ : Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = get_model_to_test_mapping(lowercase )
A_ : List[str] = get_model_to_test_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A_ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = get_model_to_tester_mapping(lowercase )
A_ : Optional[int] = get_model_to_tester_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A_ : Dict = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) | 686 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : List[str] = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : List[str] = 'BlipImageProcessor'
__SCREAMING_SNAKE_CASE : Any = ('BertTokenizer', 'BertTokenizerFast')
def __init__(self , lowercase , lowercase ):
A_ : Any = False
super().__init__(lowercase , lowercase )
A_ : Optional[Any] = self.image_processor
def __call__(self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
A_ : Optional[int] = self.tokenizer
A_ : Optional[int] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
return text_encoding
# add pixel_values
A_ : List[str] = self.image_processor(lowercase , return_tensors=lowercase )
if text is not None:
A_ : Optional[Any] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
else:
A_ : str = None
if text_encoding is not None:
encoding_image_processor.update(lowercase )
return encoding_image_processor
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
A_ : Any = self.tokenizer.model_input_names
A_ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 686 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :Any = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
'''simple docstring'''
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
) | 686 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase :Any = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def a ( lowerCamelCase__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCamelCase :List[Any] = parser.parse_args()
if args.check_lib:
lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''')
lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent
else:
lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''') | 686 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = 'mgp-str'
def __init__(self , lowercase=[32, 128] , lowercase=4 , lowercase=3 , lowercase=27 , lowercase=38 , lowercase=50257 , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=4.0 , lowercase=True , lowercase=False , lowercase=1E-5 , lowercase=0.0 , lowercase=0.0 , lowercase=0.0 , lowercase=False , lowercase=0.02 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[str] = image_size
A_ : Dict = patch_size
A_ : Optional[int] = num_channels
A_ : Union[str, Any] = max_token_length
A_ : str = num_character_labels
A_ : Tuple = num_bpe_labels
A_ : Optional[Any] = num_wordpiece_labels
A_ : Any = hidden_size
A_ : Any = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = mlp_ratio
A_ : List[Any] = distilled
A_ : List[Any] = layer_norm_eps
A_ : int = drop_rate
A_ : Optional[int] = qkv_bias
A_ : Union[str, Any] = attn_drop_rate
A_ : Dict = drop_path_rate
A_ : Any = output_aa_attentions
A_ : Dict = initializer_range | 686 |
'''simple docstring'''
lowerCamelCase :dict[tuple[int, int, int], int] = {}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
A_ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 )
A_ : Optional[Any] = state_late + state_absent + state_ontime
A_ : Dict = prizestrings
return prizestrings
def a ( lowerCamelCase__ = 30 ):
'''simple docstring'''
return _calculate(lowerCamelCase__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 686 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a (self ):
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : int = CLIPTextModel(lowercase )
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a (self , lowercase , lowercase=0 ):
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : int = image / 2 + 0.5
if str(lowercase ).startswith("""mps""" ):
A_ : int = torch.manual_seed(lowercase )
else:
A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a (self ):
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = CycleDiffusionPipeline(**lowercase )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = self.get_dummy_inputs(lowercase )
A_ : str = pipe(**lowercase )
A_ : str = output.images
A_ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a (self ):
A_ : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase , """half""" ):
A_ : List[str] = module.half()
A_ : List[Any] = CycleDiffusionPipeline(**lowercase )
A_ : Optional[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Any = self.get_dummy_inputs(lowercase )
A_ : Tuple = pipe(**lowercase )
A_ : List[str] = output.images
A_ : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a (self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def _a (self ):
return super().test_inference_batch_single_identical()
@skip_mps
def _a (self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a (self ):
return super().test_save_load_optional_components()
@skip_mps
def _a (self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A_ : List[str] = init_image.resize((512, 512) )
A_ : Dict = """CompVis/stable-diffusion-v1-4"""
A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : Any = CycleDiffusionPipeline.from_pretrained(
lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : str = """A black colored car"""
A_ : Dict = """A blue colored car"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : str = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _a (self ):
A_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A_ : Optional[int] = init_image.resize((512, 512) )
A_ : Optional[int] = """CompVis/stable-diffusion-v1-4"""
A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : Optional[Any] = """A black colored car"""
A_ : int = """A blue colored car"""
A_ : str = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : int = output.images
assert np.abs(image - expected_image ).max() < 2E-2 | 686 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'linear'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial'
__SCREAMING_SNAKE_CASE : Optional[int] = 'constant'
__SCREAMING_SNAKE_CASE : str = 'constant_with_warmup'
__SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant'
def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) )
return 1.0
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
A_ : Optional[Any] = {}
A_ : Optional[Any] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A_, A_ : Union[str, Any] = rule_str.split(""":""" )
A_ : Union[str, Any] = int(lowerCamelCase__ )
A_ : List[Any] = float(lowerCamelCase__ )
A_ : Union[str, Any] = value
A_ : Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ):
def rule_func(lowerCamelCase__ ) -> float:
A_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ):
'''simple docstring'''
A_ : Optional[Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A_ : str = lr_init - lr_end
A_ : Tuple = num_training_steps - num_warmup_steps
A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
A_ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase :List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ):
'''simple docstring'''
A_ : Optional[Any] = SchedulerType(lowerCamelCase__ )
A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) | 686 | 1 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
lowerCamelCase :Optional[int] = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def a ( lowerCamelCase__=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__UpperCAmelCase ) )
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Dict = None
def _a (self , lowercase , lowercase ):
with TemporaryDirectory() as tmp_dir:
A_ : Optional[Any] = dataset_module_factory(lowercase , cache_dir=lowercase )
A_ : Optional[Any] = import_main_class(dataset_module.module_path , dataset=lowercase )
A_ : DatasetBuilder = builder_cls(
cache_dir=lowercase , config_name=lowercase , hash=dataset_module.hash , )
A_ : int = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowercase ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
A_ : Optional[int] = cached_path(lowercase , cache_dir=lowercase )
self.assertTrue(os.path.exists(lowercase ) )
@pytest.mark.integration
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
A_ : Union[str, Any] = dataset_module_factory("""wikipedia""" , cache_dir=lowerCamelCase__ )
A_ : List[str] = import_main_class(dataset_module.module_path )
A_ : DatasetBuilder = builder_cls(
cache_dir=lowerCamelCase__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
A_ : List[str] = None
builder_instance.download_and_prepare()
A_ : Dict = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = dataset_module_factory("""wikipedia""" , cache_dir=lowerCamelCase__ )
A_ : List[Any] = import_main_class(dataset_module.module_path , dataset=lowerCamelCase__ )
A_ : DatasetBuilder = builder_cls(
cache_dir=lowerCamelCase__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
A_ : Optional[int] = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert "train" in ds
assert isinstance(ds["""train"""] , lowerCamelCase__ )
assert next(iter(ds["""train"""] ) ) | 686 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''')
lowerCamelCase :int = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCamelCase :List[str] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCamelCase :Union[str, Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCamelCase :Dict = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCamelCase :int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCamelCase :str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase :Tuple = []
lowerCamelCase :Dict = []
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : Dict = value
elif weight_type == "running_mean":
A_ : Optional[Any] = value
elif weight_type == "running_var":
A_ : int = value
elif weight_type == "num_batches_tracked":
A_ : Optional[Any] = value
elif weight_type == "weight_ih_l0":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l0":
A_ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l0":
A_ : Tuple = value
elif weight_type == "weight_ih_l1":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
A_ : Dict = value
elif weight_type == "bias_ih_l1":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l1":
A_ : Tuple = value
else:
A_ : Any = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_, A_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
A_ : List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A_ : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__ , lowerCamelCase__ ):
logger.info(f'{name} was ignored' )
continue
A_ : str = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A_, A_ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
A_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
A_ : Union[str, Any] = True
if "*" in mapped_key:
A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
A_ : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
A_ : Tuple = """weight_hh_l0"""
elif "bias_ih_l0" in name:
A_ : str = """bias_ih_l0"""
elif "bias_hh_l0" in name:
A_ : List[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
A_ : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
A_ : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
A_ : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
A_ : List[Any] = """bias_hh_l1"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
A_ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
A_ : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A_ : Dict = [8, 5, 4, 4]
A_ : Optional[Any] = [2.2]
A_ : Tuple = 64
A_ : Tuple = 3_20_00
A_ : List[Any] = 20_48
A_ : Optional[Any] = False
A_ : str = False
A_ : Optional[int] = False
elif model_name == "encodec_48khz":
A_ : Dict = [8, 5, 4, 2]
A_ : Tuple = [3.0, 6.0, 12.0, 24.0]
A_ : List[Any] = 4_80_00
A_ : Dict = 2
A_ : Dict = False
A_ : Dict = """time_group_norm"""
A_ : Optional[Any] = True
A_ : str = 1.0
A_ : Any = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ : Dict = EncodecModel(lowerCamelCase__ )
A_ : Any = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase__ )
A_ : int = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A_ : Tuple = original_checkpoint["""best_state"""]
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 686 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def a ( lowerCamelCase__ ):
'''simple docstring'''
return np.maximum(0 , lowerCamelCase__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5] | 686 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'beit'
def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = vocab_size
A_ : List[str] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : str = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : str = layer_norm_eps
A_ : Any = image_size
A_ : int = patch_size
A_ : List[str] = num_channels
A_ : Any = use_mask_token
A_ : Dict = use_absolute_position_embeddings
A_ : List[Any] = use_relative_position_bias
A_ : Tuple = use_shared_relative_position_bias
A_ : Optional[int] = layer_scale_init_value
A_ : Tuple = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : Optional[int] = use_auxiliary_head
A_ : Union[str, Any] = auxiliary_loss_weight
A_ : Tuple = auxiliary_channels
A_ : List[Any] = auxiliary_num_convs
A_ : Dict = auxiliary_concat_input
A_ : Optional[Any] = semantic_loss_ignore_index
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4 | 686 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCamelCase :str = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , *lowercase , **lowercase ):
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""" , lowercase , )
super().__init__(*lowercase , **lowercase ) | 686 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase :Optional[int] = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2
A_ : List[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ )
class _lowerCAmelCase ( __UpperCAmelCase ):
pass
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase ):
super().__init__()
A_ : int = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 )
A_ : str = deepcopy(self.diffusion )
A_ : Optional[int] = torch.quasirandom.SobolEngine(1 , scramble=lowercase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = MODELS_MAP[model_name]["""url"""]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCamelCase :str = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCamelCase :int = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCamelCase :List[Any] = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCamelCase :Optional[Any] = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return name.replace(lowerCamelCase__ , lowerCamelCase__ )
elif name.startswith(lowerCamelCase__ ):
return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def a ( lowerCamelCase__ , lowerCamelCase__=13 ):
'''simple docstring'''
A_ : Union[str, Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
A_ : Dict = 0
if string.startswith("""net.3.""" ):
depth += 1
A_ : int = string[6:]
elif string.startswith("""net.""" ):
A_ : Tuple = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
A_ : Dict = string[7:]
if string.startswith("""main.""" ):
A_ : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
A_ : Optional[Any] = string[:2]
A_ : Optional[Any] = string[2:]
else:
A_ : List[Any] = string[0]
A_ : Dict = string[1:]
if depth == max_depth:
A_ : Optional[int] = MID_NUM_TO_LAYER[layer_num]
A_ : Optional[Any] = """mid_block"""
elif depth > 0 and int(lowerCamelCase__ ) < 7:
A_ : Any = DOWN_NUM_TO_LAYER[layer_num]
A_ : Union[str, Any] = f'down_blocks.{depth}'
elif depth > 0 and int(lowerCamelCase__ ) > 7:
A_ : List[str] = UP_NUM_TO_LAYER[layer_num]
A_ : List[str] = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
A_ : str = DEPTH_0_TO_LAYER[layer_num]
A_ : Dict = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
A_ : Optional[int] = string_left[1:]
if "resnets" in new_layer:
A_ : Tuple = convert_resconv_naming(lowerCamelCase__ )
elif "attentions" in new_layer:
A_ : Optional[int] = convert_attn_naming(lowerCamelCase__ )
A_ : Dict = new_string_left
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = prefix + """.""" + new_layer + """.""" + string_left
else:
A_ : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
A_ : List[Any] = rename(lowerCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Tuple = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
A_ : int = v
return new_state_dict
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
A_ : Optional[Any] = v[:, :, 0]
else:
# bias
A_ : Union[str, Any] = v
else:
# qkv matrices
A_ : Optional[int] = v.shape[0]
A_ : str = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A_ : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A_ : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
A_ : int = download(lowerCamelCase__ )
A_ : Any = MODELS_MAP[model_name]["""sample_rate"""]
A_ : List[Any] = MODELS_MAP[model_name]["""sample_size"""]
A_ : Tuple = Object()
A_ : Union[str, Any] = sample_size
A_ : Tuple = sample_rate
A_ : int = 0
A_ : List[Any] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ )
A_ : Optional[Any] = diffusers_model.state_dict()
A_ : Dict = DiffusionUncond(lowerCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] )
A_ : Any = orig_model.diffusion_ema.eval()
A_ : Any = orig_model.state_dict()
A_ : List[str] = rename_orig_weights(lowerCamelCase__ )
A_ : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
A_ : str = value.squeeze()
A_ : Union[str, Any] = value
diffusers_model.load_state_dict(lowerCamelCase__ )
A_ : Optional[Any] = 1_00
A_ : Union[str, Any] = 33
A_ : Any = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ )
A_ : List[str] = torch.manual_seed(lowerCamelCase__ )
A_ : Any = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ )
A_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1]
A_ : List[Any] = get_crash_schedule(lowerCamelCase__ )
A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A_ : str = torch.manual_seed(33 )
A_ : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios
A_ : Optional[int] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} )
A_ : str = generated.clamp(-1 , 1 )
A_ : List[Any] = (generated - audio).abs().sum()
A_ : int = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , lowerCamelCase__ )
print("""Diff max""" , lowerCamelCase__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase :List[str] = parser.parse_args()
main(args) | 686 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Optional[int] = tempfile.mkdtemp()
# fmt: off
A_ : Union[str, Any] = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A_ : int = dict(zip(lowercase , range(len(lowercase ) ) ) )
A_ : List[str] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A_ : str = {"""unk_token""": """<unk>"""}
A_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase ) )
A_ : Union[str, Any] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"""image_std""": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A_ : List[str] = os.path.join(self.tmpdirname , lowercase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowercase , lowercase )
def _a (self , **lowercase ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **lowercase )
def _a (self , **lowercase ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **lowercase )
def _a (self , **lowercase ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase )
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : int = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_rust_tokenizer()
A_ : Tuple = self.get_image_processor()
A_ : List[Any] = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_slow.save_pretrained(self.tmpdirname )
A_ : int = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase )
A_ : Union[str, Any] = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
processor_fast.save_pretrained(self.tmpdirname )
A_ : int = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase )
self.assertIsInstance(processor_fast.tokenizer , lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase )
self.assertIsInstance(processor_fast.image_processor , lowercase )
def _a (self ):
A_ : str = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : List[Any] = self.get_image_processor(do_normalize=lowercase )
A_ : Any = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def _a (self ):
A_ : List[str] = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Optional[Any] = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : str = self.prepare_image_inputs()
A_ : Optional[int] = image_processor(lowercase , return_tensors="""np""" )
A_ : int = processor(images=lowercase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : List[str] = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : Tuple = """lower newer"""
A_ : Optional[Any] = processor(text=lowercase , return_tensors="""np""" )
A_ : List[Any] = tokenizer(lowercase , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : List[Any] = self.get_tokenizer()
A_ : List[Any] = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Dict = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : str = """google/owlvit-base-patch32"""
A_ : Union[str, Any] = OwlViTProcessor.from_pretrained(lowercase )
A_ : str = ["""cat""", """nasa badge"""]
A_ : Tuple = processor(text=lowercase )
A_ : Tuple = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Optional[Any] = """google/owlvit-base-patch32"""
A_ : Optional[Any] = OwlViTProcessor.from_pretrained(lowercase )
A_ : Optional[Any] = [["""cat""", """nasa badge"""], ["""person"""]]
A_ : List[str] = processor(text=lowercase )
A_ : Union[str, Any] = 16
A_ : Dict = len(lowercase )
A_ : Optional[Any] = max([len(lowercase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : int = """google/owlvit-base-patch32"""
A_ : List[Any] = OwlViTProcessor.from_pretrained(lowercase )
A_ : List[str] = ["""cat""", """nasa badge"""]
A_ : Optional[int] = processor(text=lowercase )
A_ : str = 16
A_ : Dict = inputs["""input_ids"""]
A_ : Any = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _a (self ):
A_ : Union[str, Any] = self.get_image_processor()
A_ : Any = self.get_tokenizer()
A_ : Tuple = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Optional[int] = self.prepare_image_inputs()
A_ : int = processor(images=lowercase , query_images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Any = OwlViTProcessor(tokenizer=lowercase , image_processor=lowercase )
A_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : str = processor.batch_decode(lowercase )
A_ : Optional[int] = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase ) | 686 |
'''simple docstring'''
from math import factorial
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
A_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A_ : Union[str, Any] = float(factorial(lowerCamelCase__ ) )
coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75)) | 686 | 1 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
lowerCamelCase :str = datasets.logging.get_logger(__name__)
lowerCamelCase :List[str] = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
lowerCamelCase :List[Any] = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
lowerCamelCase :Optional[int] = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def _a (self , lowercase ):
if self.config_name == "default":
A_ : Optional[int] = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
A_ : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def _a (self , lowercase , lowercase , lowercase , lowercase=None , lowercase=False ):
if gpus is None:
A_ : Any = 1 if torch.cuda.is_available() else 0
A_ : List[str] = {"""src""": sources, """mt""": predictions, """ref""": references}
A_ : List[Any] = [dict(zip(lowercase , lowercase ) ) for t in zip(*data.values() )]
A_, A_ : Tuple = self.scorer.predict(lowercase , gpus=lowercase , progress_bar=lowercase )
return {"mean_score": mean_score, "scores": scores} | 686 |
'''simple docstring'''
import re
def a ( lowerCamelCase__ ):
'''simple docstring'''
if len(re.findall("""[ATCG]""" , lowerCamelCase__ ) ) != len(lowerCamelCase__ ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ = 50 ):
'''simple docstring'''
A_ : Any = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"{solution() = }") | 686 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase__ ):
http_head("""https://huggingface.co""" ) | 686 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCamelCase__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod() | 686 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase :Any = re.compile(R'''\s+''')
def a ( lowerCamelCase__ ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def a ( lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""]
A_ : Optional[int] = example["""content"""].splitlines()
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ):
'''simple docstring'''
A_ : Any = ["""unit tests""", """test file""", """configuration file"""]
A_ : List[str] = example["""content"""].splitlines()
A_ : str = 0
A_ : Union[str, Any] = 0
# first test
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : List[Any] = example["""content"""].count("""\n""" )
A_ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = ["""def """, """class """, """for """, """while """]
A_ : Optional[int] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a ( lowerCamelCase__ , lowerCamelCase__=4 ):
'''simple docstring'''
A_ : Tuple = example["""content"""].splitlines()
A_ : int = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""]
A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ )
return {"ratio": ratio}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = {}
results.update(get_hash(lowerCamelCase__ ) )
results.update(line_stats(lowerCamelCase__ ) )
results.update(alpha_stats(lowerCamelCase__ ) )
results.update(char_token_ratio(lowerCamelCase__ ) )
results.update(is_autogenerated(lowerCamelCase__ ) )
results.update(is_config_or_test(lowerCamelCase__ ) )
results.update(has_no_keywords(lowerCamelCase__ ) )
results.update(has_few_assignments(lowerCamelCase__ ) )
return results
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a ( lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """rb""" ) as f_in:
with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
os.unlink(lowerCamelCase__ )
# Settings
lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCamelCase :Tuple = parser.parse_args()
if args.num_workers is None:
lowerCamelCase :Tuple = multiprocessing.cpu_count()
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase :List[Any] = time.time()
lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCamelCase :int = time.time()
lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCamelCase :int = set(ds.unique('''hash'''))
lowerCamelCase :List[str] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCamelCase :Dict = time.time()
lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase :List[str] = time.time()
lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCamelCase :int = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
lowerCamelCase :Tuple = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
lowerCamelCase :Tuple = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 686 | 1 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowerCamelCase :Optional[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
lowerCamelCase :str = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
lowerCamelCase :Union[str, Any] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return float((preds == labels).mean() )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
A_ : Dict = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = float(pearsonr(lowerCamelCase__ , lowerCamelCase__ )[0] )
A_ : Dict = float(spearmanr(lowerCamelCase__ , lowerCamelCase__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def _a (self , lowercase , lowercase ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowercase , lowercase )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowercase , lowercase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowercase , lowercase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowercase , lowercase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" ) | 686 |
'''simple docstring'''
import pytest
lowerCamelCase :Optional[Any] = '''__dummy_dataset1__'''
lowerCamelCase :List[Any] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = dataset_loading_script_name
A_ : int = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase__ )
A_ : Tuple = script_dir / f'{script_name}.py'
with open(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ ) | 686 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = len(lowerCamelCase__ )
A_ : int = len(lowerCamelCase__ )
A_ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
A_ : list = []
for char_count in range(lowerCamelCase__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(lowerCamelCase__ )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''') | 686 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCamelCase :int = datasets.load_iris()
lowerCamelCase :str = np.array(data['''data'''])
lowerCamelCase :Dict = np.array(data['''target'''])
lowerCamelCase :Union[str, Any] = data['''target_names''']
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
A_ : List[str] = []
for data_point in data:
A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 686 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :int = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'yolos'
def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : int = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : List[str] = image_size
A_ : str = patch_size
A_ : int = num_channels
A_ : Optional[int] = qkv_bias
A_ : List[Any] = num_detection_tokens
A_ : Tuple = use_mid_position_embeddings
A_ : int = auxiliary_loss
# Hungarian matcher
A_ : int = class_cost
A_ : List[Any] = bbox_cost
A_ : Optional[int] = giou_cost
# Loss coefficients
A_ : Any = bbox_loss_coefficient
A_ : List[Any] = giou_loss_coefficient
A_ : str = eos_coefficient
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4
@property
def _a (self ):
return 12 | 686 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__(self , lowercase , lowercase , lowercase ):
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
A_ : Tuple = [sequences]
A_ : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ):
A_ : int = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a (self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ):
A_ : Any = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A_ : str = self.tokenizer.eos_token
try:
A_ : str = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ : Any = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a (self , **lowercase ):
if kwargs.get("""multi_class""" , lowercase ) is not None:
A_ : Tuple = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A_ : Optional[Any] = {}
if "candidate_labels" in kwargs:
A_ : str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A_ : List[str] = kwargs["""hypothesis_template"""]
A_ : List[Any] = {}
if "multi_label" in kwargs:
A_ : Optional[Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__(self , lowercase , *lowercase , **lowercase , ):
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
A_ : Union[str, Any] = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowercase , **lowercase )
def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ):
A_, A_ : int = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
A_ : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def _a (self , lowercase ):
A_ : Optional[Any] = inputs["""candidate_label"""]
A_ : List[Any] = inputs["""sequence"""]
A_ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ : List[str] = self.model(**lowercase )
A_ : str = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a (self , lowercase , lowercase=False ):
A_ : Any = [outputs["""candidate_label"""] for outputs in model_outputs]
A_ : str = [outputs["""sequence"""] for outputs in model_outputs]
A_ : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A_ : Dict = logits.shape[0]
A_ : Any = len(lowercase )
A_ : List[str] = N // n
A_ : Tuple = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ : Union[str, Any] = self.entailment_id
A_ : Any = -1 if entailment_id == 0 else 0
A_ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ : Union[str, Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Optional[Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ : Optional[int] = reshaped_outputs[..., self.entailment_id]
A_ : int = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Any = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 686 | 1 |
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def a ( ):
'''simple docstring'''
A_ : Dict = 9
A_ : List[str] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
A_ : List[str] = kruskal(lowerCamelCase__ , lowerCamelCase__ )
A_ : List[Any] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(lowerCamelCase__ ) == sorted(lowerCamelCase__ ) | 686 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :int = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'yolos'
def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : int = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : List[str] = image_size
A_ : str = patch_size
A_ : int = num_channels
A_ : Optional[int] = qkv_bias
A_ : List[Any] = num_detection_tokens
A_ : Tuple = use_mid_position_embeddings
A_ : int = auxiliary_loss
# Hungarian matcher
A_ : int = class_cost
A_ : List[Any] = bbox_cost
A_ : Optional[int] = giou_cost
# Loss coefficients
A_ : Any = bbox_loss_coefficient
A_ : List[Any] = giou_loss_coefficient
A_ : str = eos_coefficient
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4
@property
def _a (self ):
return 12 | 686 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase :Optional[Any] = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'convbert'
def __init__(self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase=768 , lowercase=2 , lowercase=9 , lowercase=1 , lowercase=None , **lowercase , ):
super().__init__(
pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase , )
A_ : Optional[Any] = vocab_size
A_ : Tuple = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Optional[int] = intermediate_size
A_ : str = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : int = attention_probs_dropout_prob
A_ : Optional[int] = max_position_embeddings
A_ : Union[str, Any] = type_vocab_size
A_ : Dict = initializer_range
A_ : int = layer_norm_eps
A_ : List[Any] = embedding_size
A_ : Any = head_ratio
A_ : Optional[Any] = conv_kernel_size
A_ : Union[str, Any] = num_groups
A_ : List[Any] = classifier_dropout
class _lowerCAmelCase ( __UpperCAmelCase ):
@property
def _a (self ):
if self.task == "multiple-choice":
A_ : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] ) | 686 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowerCamelCase :int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCamelCase :int = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowerCamelCase :Optional[Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _a (self , lowercase=None , lowercase=None , lowercase=False ):
if concatenate_texts:
return compute_measures(lowercase , lowercase )["wer"]
else:
A_ : List[Any] = 0
A_ : Optional[int] = 0
for prediction, reference in zip(lowercase , lowercase ):
A_ : Any = compute_measures(lowercase , lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 686 | 1 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCamelCase :List[str] = get_tests_dir('''fixtures''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# A mock response for an HTTP head request to emulate server down
A_ : int = mock.Mock()
A_ : Optional[Any] = 500
A_ : List[Any] = {}
A_ : Optional[int] = HTTPError
A_ : List[str] = {}
# Download this model to make sure it's in the cache.
A_ : Dict = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowercase ) as mock_head:
A_ : Dict = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def _a (self ):
# This test is for deprecated behavior and can be removed in v5
A_ : List[str] = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class _lowerCAmelCase ( unittest.TestCase ):
@classmethod
def _a (cls ):
A_ : int = TOKEN
HfFolder.save_token(lowercase )
@classmethod
def _a (cls ):
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def _a (self ):
A_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(lowercase )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
A_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase , repo_id="""test-feature-extractor""" , push_to_hub=lowercase , use_auth_token=self._token )
A_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
def _a (self ):
A_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(lowercase )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
A_ : Any = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowercase , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=lowercase , use_auth_token=self._token )
A_ : str = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowercase , getattr(lowercase , lowercase ) )
def _a (self ):
CustomFeatureExtractor.register_for_auto_class()
A_ : Optional[int] = CustomFeatureExtractor.from_pretrained(lowercase )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
A_ : int = AutoFeatureExtractor.from_pretrained(
F'{USER}/test-dynamic-feature-extractor' , trust_remote_code=lowercase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" ) | 686 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a (self ):
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : int = CLIPTextModel(lowercase )
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a (self , lowercase , lowercase=0 ):
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : int = image / 2 + 0.5
if str(lowercase ).startswith("""mps""" ):
A_ : int = torch.manual_seed(lowercase )
else:
A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a (self ):
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = CycleDiffusionPipeline(**lowercase )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = self.get_dummy_inputs(lowercase )
A_ : str = pipe(**lowercase )
A_ : str = output.images
A_ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a (self ):
A_ : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase , """half""" ):
A_ : List[str] = module.half()
A_ : List[Any] = CycleDiffusionPipeline(**lowercase )
A_ : Optional[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Any = self.get_dummy_inputs(lowercase )
A_ : Tuple = pipe(**lowercase )
A_ : List[str] = output.images
A_ : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a (self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def _a (self ):
return super().test_inference_batch_single_identical()
@skip_mps
def _a (self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a (self ):
return super().test_save_load_optional_components()
@skip_mps
def _a (self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A_ : List[str] = init_image.resize((512, 512) )
A_ : Dict = """CompVis/stable-diffusion-v1-4"""
A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : Any = CycleDiffusionPipeline.from_pretrained(
lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : str = """A black colored car"""
A_ : Dict = """A blue colored car"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : str = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _a (self ):
A_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A_ : Optional[int] = init_image.resize((512, 512) )
A_ : Optional[int] = """CompVis/stable-diffusion-v1-4"""
A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : Optional[Any] = """A black colored car"""
A_ : int = """A blue colored car"""
A_ : str = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : int = output.images
assert np.abs(image - expected_image ).max() < 2E-2 | 686 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :int = logging.get_logger(__name__)
lowerCamelCase :List[str] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'swin2sr'
__SCREAMING_SNAKE_CASE : int = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__(self , lowercase=64 , lowercase=1 , lowercase=3 , lowercase=180 , lowercase=[6, 6, 6, 6, 6, 6] , lowercase=[6, 6, 6, 6, 6, 6] , lowercase=8 , lowercase=2.0 , lowercase=True , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase="gelu" , lowercase=False , lowercase=0.02 , lowercase=1E-5 , lowercase=2 , lowercase=1.0 , lowercase="1conv" , lowercase="pixelshuffle" , **lowercase , ):
super().__init__(**lowercase )
A_ : str = image_size
A_ : List[Any] = patch_size
A_ : Union[str, Any] = num_channels
A_ : Union[str, Any] = embed_dim
A_ : Any = depths
A_ : List[str] = len(lowercase )
A_ : Tuple = num_heads
A_ : Tuple = window_size
A_ : int = mlp_ratio
A_ : Optional[int] = qkv_bias
A_ : int = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Union[str, Any] = drop_path_rate
A_ : Dict = hidden_act
A_ : int = use_absolute_embeddings
A_ : Dict = layer_norm_eps
A_ : Union[str, Any] = initializer_range
A_ : List[str] = upscale
A_ : List[str] = img_range
A_ : Dict = resi_connection
A_ : Dict = upsampler | 686 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'down'
def _a (self ):
A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'down'
def _a (self ):
A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_, A_ : str = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : int = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'mid'
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'mid'
def _a (self ):
A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common()
A_ : List[str] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'mid'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[int] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : int = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(lowercase ) | 686 | 1 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def a ( lowerCamelCase__ = 3 ):
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(lowerCamelCase__ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""" )
A_ : str = QuantumRegister(lowerCamelCase__ , """qr""" )
A_ : Optional[Any] = ClassicalRegister(lowerCamelCase__ , """cr""" )
A_ : Dict = QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
A_ : Optional[int] = number_of_qubits
for i in range(lowerCamelCase__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowerCamelCase__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowerCamelCase__ , lowerCamelCase__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowerCamelCase__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowerCamelCase__ , lowerCamelCase__ )
# simulate with 10000 shots
A_ : Tuple = Aer.get_backend("""qasm_simulator""" )
A_ : Optional[int] = execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_00_00 )
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
) | 686 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
A_ : List[Any] = word_bank or []
# create a table
A_ : int = len(lowerCamelCase__ ) + 1
A_ : list[list[list[str]]] = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
A_ : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
A_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
) | 686 | 1 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative in a semiconductor""" )
elif hole_conc < 0:
raise ValueError("""Hole concentration cannot be negative in a semiconductor""" )
elif intrinsic_conc < 0:
raise ValueError(
"""Intrinsic concentration cannot be negative in a semiconductor""" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = []
A_ : int = set({"""(""", """[""", """{"""} )
A_ : Union[str, Any] = set({""")""", """]""", """}"""} )
A_ : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowerCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase__ ) == 0
def a ( ):
'''simple docstring'''
A_ : int = input("""Enter sequence of brackets: """ )
if is_balanced(lowerCamelCase__ ):
print(lowerCamelCase__ , """is balanced""" )
else:
print(lowerCamelCase__ , """is not balanced""" )
if __name__ == "__main__":
main() | 686 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase :Dict = logging.get_logger(__name__)
lowerCamelCase :Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase :Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[Any] = value
elif weight_type == "weight_g":
A_ : Optional[Any] = value
elif weight_type == "weight_v":
A_ : List[str] = value
elif weight_type == "bias":
A_ : List[str] = value
elif weight_type == "running_mean":
A_ : Union[str, Any] = value
elif weight_type == "running_var":
A_ : Any = value
elif weight_type == "num_batches_tracked":
A_ : List[Any] = value
elif weight_type == "inv_freq":
A_ : Dict = value
else:
A_ : str = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = []
A_ : Union[str, Any] = fairseq_model.state_dict()
A_ : str = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
A_ : int = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == """group""" , )
A_ : int = True
else:
for key, mapped_key in MAPPING.items():
A_ : Dict = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A_ : List[str] = True
if "*" in mapped_key:
A_ : Any = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Any = mapped_key.replace("""*""" , lowerCamelCase__ )
if "pos_bias_u" in name:
A_ : List[str] = None
elif "pos_bias_v" in name:
A_ : Optional[int] = None
elif "weight_g" in name:
A_ : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "bias" in name:
A_ : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : List[Any] = """running_mean"""
elif "inv_freq" in name:
A_ : str = """inv_freq"""
elif "running_var" in name:
A_ : str = """running_var"""
elif "num_batches_tracked" in name:
A_ : Union[str, Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = full_name.split("""conv_layers.""" )[-1]
A_ : Dict = name.split(""".""" )
A_ : int = int(items[0] )
A_ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
A_ : Tuple = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
A_ : List[Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
A_ : Tuple = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
A_ : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ):
'''simple docstring'''
if config_path is not None:
A_ : Any = WavaVecaConformerConfig.from_pretrained(lowerCamelCase__ , hidden_act="""swish""" )
else:
A_ : Union[str, Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
A_ : Optional[Any] = """rotary"""
if is_finetuned:
if dict_path:
A_ : Optional[int] = Dictionary.load(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ : Optional[Any] = target_dict.pad_index
A_ : List[Any] = target_dict.bos_index
A_ : List[Any] = target_dict.eos_index
A_ : Optional[Any] = len(target_dict.symbols )
A_ : int = os.path.join(lowerCamelCase__ , """vocab.json""" )
if not os.path.isdir(lowerCamelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
A_ : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
A_ : Tuple = 0
A_ : str = 1
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
A_ : Any = WavaVecaCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCamelCase__ , )
A_ : Optional[Any] = True if config.feat_extract_norm == """layer""" else False
A_ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
A_ : List[str] = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
A_ : int = WavaVecaConformerForCTC(lowerCamelCase__ )
else:
A_ : str = WavaVecaConformerForPreTraining(lowerCamelCase__ )
if is_finetuned:
A_, A_, A_ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
A_ : Dict = argparse.Namespace(task="""audio_pretraining""" )
A_ : Optional[int] = fairseq.tasks.setup_task(lowerCamelCase__ )
A_, A_, A_ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCamelCase__ )
A_ : Dict = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase :Optional[int] = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 686 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
A_ : Optional[int] = field
A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
A_ : Optional[Any] = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def _a (self ):
# Build iterable dataset
if self.streaming:
A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : int = None
A_ : Union[str, Any] = None
A_ : int = None
A_ : List[str] = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
A_ : str = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
A_ : Any = dataset
A_ : List[str] = path_or_buf
A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ : Optional[Any] = num_proc
A_ : List[Any] = """utf-8"""
A_ : int = to_json_kwargs
def _a (self ):
A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase )
A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer:
A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
A_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def _a (self , lowercase ):
A_, A_, A_, A_, A_ : List[str] = args
A_ : List[str] = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ : Any = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
A_, A_ : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowercase )
return written | 686 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ = 3 , lowerCamelCase__ = 7 , lowerCamelCase__ = 1_00_00_00 ):
'''simple docstring'''
A_ : List[str] = 0
A_ : Any = 1
for current_denominator in range(1 , limit + 1 ):
A_ : List[Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
A_ : Union[str, Any] = current_numerator
A_ : Optional[int] = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0)) | 686 |
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
A_ : Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = get_model_to_test_mapping(lowercase )
A_ : List[str] = get_model_to_test_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A_ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = get_model_to_tester_mapping(lowercase )
A_ : Optional[int] = get_model_to_tester_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A_ : Dict = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) | 686 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = [0] * len(lowerCamelCase__ )
A_ : Optional[int] = []
A_ : Any = []
A_ : List[str] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase__ ) ):
if indegree[i] == 0:
queue.append(lowerCamelCase__ )
while queue:
A_ : str = queue.pop(0 )
cnt += 1
topo.append(lowerCamelCase__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase__ )
if cnt != len(lowerCamelCase__ ):
print("""Cycle exists""" )
else:
print(lowerCamelCase__ )
# Adjacency List of Graph
lowerCamelCase :Tuple = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph) | 686 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase :Any = {
'''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongT5EncoderModel''',
'''LongT5ForConditionalGeneration''',
'''LongT5Model''',
'''LongT5PreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Optional[Any] = [
'''FlaxLongT5ForConditionalGeneration''',
'''FlaxLongT5Model''',
'''FlaxLongT5PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowerCamelCase :Optional[Any] = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowerCamelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 686 |
'''simple docstring'''
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCamelCase :Any = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def a ( lowerCamelCase__ ):
'''simple docstring'''
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCamelCase :List[Any] = parser.parse_args()
if args.check_lib:
lowerCamelCase :Union[str, Any] = importlib.import_module('''transformers''')
lowerCamelCase :Union[str, Any] = Path(transformers_module.__file__).parent
else:
lowerCamelCase :List[str] = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''') | 686 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = 'microsoft/speecht5_tts'
__SCREAMING_SNAKE_CASE : List[str] = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
__SCREAMING_SNAKE_CASE : List[str] = 'text_reader'
__SCREAMING_SNAKE_CASE : Any = SpeechTaProcessor
__SCREAMING_SNAKE_CASE : int = SpeechTaForTextToSpeech
__SCREAMING_SNAKE_CASE : Optional[int] = SpeechTaHifiGan
__SCREAMING_SNAKE_CASE : str = ['text']
__SCREAMING_SNAKE_CASE : Tuple = ['audio']
def _a (self ):
if self.post_processor is None:
A_ : Dict = """microsoft/speecht5_hifigan"""
super().setup()
def _a (self , lowercase , lowercase=None ):
A_ : List[Any] = self.pre_processor(text=lowercase , return_tensors="""pt""" , truncation=lowercase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
A_ : Dict = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
A_ : Dict = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _a (self , lowercase ):
with torch.no_grad():
return self.model.generate_speech(**lowercase )
def _a (self , lowercase ):
with torch.no_grad():
return self.post_processor(lowercase ).cpu().detach() | 686 |
'''simple docstring'''
lowerCamelCase :dict[tuple[int, int, int], int] = {}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
A_ : Tuple = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
A_ : int = _calculate(days - 1 , lowerCamelCase__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
A_ : Union[str, Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
A_ : Optional[int] = _calculate(days - 1 , lowerCamelCase__ , 0 )
A_ : Optional[Any] = state_late + state_absent + state_ontime
A_ : Dict = prizestrings
return prizestrings
def a ( lowerCamelCase__ = 30 ):
'''simple docstring'''
return _calculate(lowerCamelCase__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution()) | 686 | 1 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowerCamelCase :Any = logging.getLogger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'sequence-classification'
def __init__(self , lowercase ):
if type(lowercase ) == dict:
A_ : List[Any] = Namespace(**lowercase )
A_ : Any = glue_output_modes[hparams.task]
A_ : Tuple = glue_tasks_num_labels[hparams.task]
super().__init__(lowercase , lowercase , self.mode )
def _a (self , **lowercase ):
return self.model(**lowercase )
def _a (self , lowercase , lowercase ):
A_ : str = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A_ : List[str] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
A_ : Union[str, Any] = self(**lowercase )
A_ : Optional[int] = outputs[0]
A_ : Dict = self.trainer.lr_schedulers[0]["""scheduler"""]
A_ : Optional[int] = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _a (self ):
A_ : List[str] = self.hparams
A_ : List[Any] = processors[args.task]()
A_ : List[Any] = processor.get_labels()
for mode in ["train", "dev"]:
A_ : Optional[int] = self._feature_file(lowercase )
if os.path.exists(lowercase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , lowercase )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
A_ : Optional[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
A_ : Union[str, Any] = convert_examples_to_features(
lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , lowercase )
torch.save(lowercase , lowercase )
def _a (self , lowercase , lowercase , lowercase = False ):
A_ : Union[str, Any] = """dev""" if mode == """test""" else mode
A_ : Any = self._feature_file(lowercase )
logger.info("""Loading features from cached file %s""" , lowercase )
A_ : Any = torch.load(lowercase )
A_ : Any = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
A_ : Dict = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
A_ : Union[str, Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
A_ : List[str] = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
A_ : str = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(lowercase , lowercase , lowercase , lowercase ) , batch_size=lowercase , shuffle=lowercase , )
def _a (self , lowercase , lowercase ):
A_ : Union[str, Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A_ : Union[str, Any] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
A_ : List[str] = self(**lowercase )
A_, A_ : Dict = outputs[:2]
A_ : List[Any] = logits.detach().cpu().numpy()
A_ : List[str] = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _a (self , lowercase ):
A_ : Dict = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
A_ : List[str] = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
A_ : int = np.argmax(lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
A_ : str = np.squeeze(lowercase )
A_ : str = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
A_ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
A_ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
A_ : Optional[int] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , lowercase , lowercase )}
A_ : int = dict(results.items() )
A_ : Union[str, Any] = results
return ret, preds_list, out_label_list
def _a (self , lowercase ):
A_, A_, A_ : Tuple = self._eval_end(lowercase )
A_ : Union[str, Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _a (self , lowercase ):
A_, A_, A_ : List[Any] = self._eval_end(lowercase )
A_ : int = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _a (lowercase , lowercase ):
BaseTransformer.add_model_specific_args(lowercase , lowercase )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=lowercase , required=lowercase , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=lowercase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def a ( ):
'''simple docstring'''
A_ : Tuple = argparse.ArgumentParser()
add_generic_args(lowerCamelCase__ , os.getcwd() )
A_ : List[str] = GLUETransformer.add_model_specific_args(lowerCamelCase__ , os.getcwd() )
A_ : Tuple = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
A_ : Any = os.path.join(
"""./results""" , f'{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}' , )
os.makedirs(args.output_dir )
A_ : Optional[Any] = GLUETransformer(lowerCamelCase__ )
A_ : Optional[Any] = generic_train(lowerCamelCase__ , lowerCamelCase__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
A_ : List[Any] = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=lowerCamelCase__ ) )
A_ : int = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowerCamelCase__ )
if __name__ == "__main__":
main() | 686 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'linear'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial'
__SCREAMING_SNAKE_CASE : Optional[int] = 'constant'
__SCREAMING_SNAKE_CASE : str = 'constant_with_warmup'
__SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant'
def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) )
return 1.0
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
A_ : Optional[Any] = {}
A_ : Optional[Any] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A_, A_ : Union[str, Any] = rule_str.split(""":""" )
A_ : Union[str, Any] = int(lowerCamelCase__ )
A_ : List[Any] = float(lowerCamelCase__ )
A_ : Union[str, Any] = value
A_ : Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ):
def rule_func(lowerCamelCase__ ) -> float:
A_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ):
'''simple docstring'''
A_ : Optional[Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A_ : str = lr_init - lr_end
A_ : Tuple = num_training_steps - num_warmup_steps
A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
A_ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase :List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ):
'''simple docstring'''
A_ : Optional[Any] = SchedulerType(lowerCamelCase__ )
A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) | 686 | 1 |
'''simple docstring'''
import math
from collections.abc import Callable
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : float = xa
A_ : float = xa
while True:
if x_n == x_na or function(lowerCamelCase__ ) == function(lowerCamelCase__ ):
raise ZeroDivisionError("""float division by zero, could not find root""" )
A_ : float = x_na - (
function(lowerCamelCase__ ) / ((function(lowerCamelCase__ ) - function(lowerCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
A_ : List[Any] = x_na
A_ : Optional[Any] = x_na
def a ( lowerCamelCase__ ):
'''simple docstring'''
return math.pow(lowerCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5)) | 686 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''')
lowerCamelCase :int = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCamelCase :List[str] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCamelCase :Union[str, Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCamelCase :Dict = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCamelCase :int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCamelCase :str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase :Tuple = []
lowerCamelCase :Dict = []
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : Dict = value
elif weight_type == "running_mean":
A_ : Optional[Any] = value
elif weight_type == "running_var":
A_ : int = value
elif weight_type == "num_batches_tracked":
A_ : Optional[Any] = value
elif weight_type == "weight_ih_l0":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l0":
A_ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l0":
A_ : Tuple = value
elif weight_type == "weight_ih_l1":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
A_ : Dict = value
elif weight_type == "bias_ih_l1":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l1":
A_ : Tuple = value
else:
A_ : Any = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_, A_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
A_ : List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A_ : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__ , lowerCamelCase__ ):
logger.info(f'{name} was ignored' )
continue
A_ : str = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A_, A_ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
A_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
A_ : Union[str, Any] = True
if "*" in mapped_key:
A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
A_ : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
A_ : Tuple = """weight_hh_l0"""
elif "bias_ih_l0" in name:
A_ : str = """bias_ih_l0"""
elif "bias_hh_l0" in name:
A_ : List[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
A_ : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
A_ : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
A_ : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
A_ : List[Any] = """bias_hh_l1"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
A_ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
A_ : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A_ : Dict = [8, 5, 4, 4]
A_ : Optional[Any] = [2.2]
A_ : Tuple = 64
A_ : Tuple = 3_20_00
A_ : List[Any] = 20_48
A_ : Optional[Any] = False
A_ : str = False
A_ : Optional[int] = False
elif model_name == "encodec_48khz":
A_ : Dict = [8, 5, 4, 2]
A_ : Tuple = [3.0, 6.0, 12.0, 24.0]
A_ : List[Any] = 4_80_00
A_ : Dict = 2
A_ : Dict = False
A_ : Dict = """time_group_norm"""
A_ : Optional[Any] = True
A_ : str = 1.0
A_ : Any = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ : Dict = EncodecModel(lowerCamelCase__ )
A_ : Any = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase__ )
A_ : int = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A_ : Tuple = original_checkpoint["""best_state"""]
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 686 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , ):
'''simple docstring'''
A_ : Optional[Any] = {}
if train_file is not None:
A_ : int = [train_file]
if eval_file is not None:
A_ : Any = [eval_file]
if test_file is not None:
A_ : Optional[int] = [test_file]
A_ : Union[str, Any] = datasets.load_dataset("""csv""" , data_files=lowerCamelCase__ )
A_ : Dict = list(ds[list(files.keys() )[0]].features.keys() )
A_ : str = features_name.pop(lowerCamelCase__ )
A_ : Tuple = list(set(ds[list(files.keys() )[0]][label_name] ) )
A_ : Dict = {label: i for i, label in enumerate(lowerCamelCase__ )}
A_ : Optional[int] = tokenizer.model_input_names
A_ : Any = {}
if len(lowerCamelCase__ ) == 1:
for k in files.keys():
A_ : Optional[int] = ds[k].map(
lambda lowerCamelCase__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" ) , batched=lowerCamelCase__ , )
elif len(lowerCamelCase__ ) == 2:
for k in files.keys():
A_ : Union[str, Any] = ds[k].map(
lambda lowerCamelCase__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , padding="""max_length""" , ) , batched=lowerCamelCase__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A_ : int = {k: v for k, v in ex.items() if k in input_names}
A_ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A_ : List[str] = {k: v for k, v in ex.items() if k in input_names}
A_ : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A_ : List[Any] = {k: v for k, v in ex.items() if k in input_names}
A_ : List[Any] = labelaid[ex[label_name]]
yield (d, label)
A_ : int = (
tf.data.Dataset.from_generator(
lowerCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A_ : Union[str, Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A_ : Optional[Any] = (
tf.data.Dataset.from_generator(
lowerCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A_ : Dict = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A_ : str = (
tf.data.Dataset.from_generator(
lowerCamelCase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A_ : List[str] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCamelCase :Any = logging.getLogger(__name__)
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : int = field(metadata={'help': 'Which column contains the label'} )
__SCREAMING_SNAKE_CASE : str = field(default=__UpperCAmelCase , metadata={'help': 'The path of the training file'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__UpperCAmelCase , metadata={'help': 'The path of the development file'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=__UpperCAmelCase , metadata={'help': 'The path of the test file'} )
__SCREAMING_SNAKE_CASE : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=__UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : bool = field(default=__UpperCAmelCase , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def a ( ):
'''simple docstring'''
A_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A_, A_, A_ : Tuple = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
f'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
f'16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A_ : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A_, A_, A_, A_ : int = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCamelCase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
A_ : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCamelCase__ ) , labelaid=lowerCamelCase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
A_ : Tuple = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=lowerCamelCase__ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCamelCase__ ) -> Dict:
A_ : int = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A_ : Union[str, Any] = TFTrainer(
model=lowerCamelCase__ , args=lowerCamelCase__ , train_dataset=lowerCamelCase__ , eval_dataset=lowerCamelCase__ , compute_metrics=lowerCamelCase__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A_ : str = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
A_ : Union[str, Any] = trainer.evaluate()
A_ : int = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(lowerCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(f' {key} = {value}' )
writer.write(f'{key} = {value}\n' )
results.update(lowerCamelCase__ )
return results
if __name__ == "__main__":
main() | 686 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :Any = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'beit'
def __init__(self , lowercase=8192 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ):
super().__init__(**lowercase )
A_ : Union[str, Any] = vocab_size
A_ : List[str] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : str = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : str = layer_norm_eps
A_ : Any = image_size
A_ : int = patch_size
A_ : List[str] = num_channels
A_ : Any = use_mask_token
A_ : Dict = use_absolute_position_embeddings
A_ : List[Any] = use_relative_position_bias
A_ : Tuple = use_shared_relative_position_bias
A_ : Optional[int] = layer_scale_init_value
A_ : Tuple = drop_path_rate
A_ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
A_ : Tuple = out_indices
A_ : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
A_ : Optional[int] = use_auxiliary_head
A_ : Union[str, Any] = auxiliary_loss_weight
A_ : Tuple = auxiliary_channels
A_ : List[Any] = auxiliary_num_convs
A_ : Dict = auxiliary_concat_input
A_ : Optional[Any] = semantic_loss_ignore_index
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4 | 686 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase :List[Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = ['pixel_values']
def __init__(self , lowercase = True , lowercase = None , lowercase = None , lowercase = PILImageResampling.BILINEAR , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(**lowercase )
A_ : int = size if size is not None else {"""shortest_edge""": 384}
A_ : Tuple = get_size_dict(lowercase , default_to_square=lowercase )
A_ : str = do_resize
A_ : Dict = size
# Default value set here for backwards compatibility where the value in config is None
A_ : Tuple = crop_pct if crop_pct is not None else 224 / 256
A_ : Optional[int] = resample
A_ : List[Any] = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Tuple = do_normalize
A_ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a (self , lowercase , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ):
A_ : str = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
A_ : Union[str, Any] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
A_ : int = int(shortest_edge / crop_pct )
A_ : Dict = get_resize_output_image_size(lowercase , size=lowercase , default_to_square=lowercase )
A_ : Union[str, Any] = resize(image=lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowercase , size=(shortest_edge, shortest_edge) , data_format=lowercase , **lowercase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowercase , size=(shortest_edge, shortest_edge) , resample=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase = None , **lowercase , ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def _a (self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
A_ : Any = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = crop_pct if crop_pct is not None else self.crop_pct
A_ : Any = resample if resample is not None else self.resample
A_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
A_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
A_ : Tuple = image_mean if image_mean is not None else self.image_mean
A_ : List[Any] = image_std if image_std is not None else self.image_std
A_ : Any = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(lowercase , default_to_square=lowercase )
A_ : List[str] = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
A_ : List[Any] = [to_numpy_array(lowercase ) for image in images]
if do_resize:
A_ : List[Any] = [self.resize(image=lowercase , size=lowercase , crop_pct=lowercase , resample=lowercase ) for image in images]
if do_rescale:
A_ : Optional[Any] = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
A_ : Optional[int] = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
A_ : Optional[Any] = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
A_ : str = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 686 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCamelCase :Optional[int] = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_8_0_0_0,
'''sample_size''': 1_3_1_0_7_2,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_6_0_0_0,
'''sample_size''': 6_5_5_3_6,
},
}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return torch.atana(lowerCamelCase__ , lowerCamelCase__ ) / math.pi * 2
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = torch.sin(t * math.pi / 2 ) ** 2
A_ : List[Any] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCamelCase__ , lowerCamelCase__ )
class _lowerCAmelCase ( __UpperCAmelCase ):
pass
class _lowerCAmelCase ( nn.Module ):
def __init__(self , lowercase ):
super().__init__()
A_ : int = DiffusionAttnUnetaD(lowercase , n_attn_layers=4 )
A_ : str = deepcopy(self.diffusion )
A_ : Optional[int] = torch.quasirandom.SobolEngine(1 , scramble=lowercase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = MODELS_MAP[model_name]["""url"""]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCamelCase :str = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCamelCase :str = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCamelCase :int = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCamelCase :List[Any] = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCamelCase :Optional[Any] = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def a ( lowerCamelCase__ ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowerCamelCase__ ) and not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return name.replace(lowerCamelCase__ , lowerCamelCase__ )
elif name.startswith(lowerCamelCase__ ):
return [name.replace(lowerCamelCase__ , lowerCamelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def a ( lowerCamelCase__ , lowerCamelCase__=13 ):
'''simple docstring'''
A_ : Union[str, Any] = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
A_ : Dict = 0
if string.startswith("""net.3.""" ):
depth += 1
A_ : int = string[6:]
elif string.startswith("""net.""" ):
A_ : Tuple = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
A_ : Dict = string[7:]
if string.startswith("""main.""" ):
A_ : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
A_ : Optional[Any] = string[:2]
A_ : Optional[Any] = string[2:]
else:
A_ : List[Any] = string[0]
A_ : Dict = string[1:]
if depth == max_depth:
A_ : Optional[int] = MID_NUM_TO_LAYER[layer_num]
A_ : Optional[Any] = """mid_block"""
elif depth > 0 and int(lowerCamelCase__ ) < 7:
A_ : Any = DOWN_NUM_TO_LAYER[layer_num]
A_ : Union[str, Any] = f'down_blocks.{depth}'
elif depth > 0 and int(lowerCamelCase__ ) > 7:
A_ : List[str] = UP_NUM_TO_LAYER[layer_num]
A_ : List[str] = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
A_ : str = DEPTH_0_TO_LAYER[layer_num]
A_ : Dict = f'up_blocks.{max_depth - 1}' if int(lowerCamelCase__ ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
A_ : Optional[int] = string_left[1:]
if "resnets" in new_layer:
A_ : Tuple = convert_resconv_naming(lowerCamelCase__ )
elif "attentions" in new_layer:
A_ : Optional[int] = convert_attn_naming(lowerCamelCase__ )
A_ : Dict = new_string_left
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Union[str, Any] = prefix + """.""" + new_layer + """.""" + string_left
else:
A_ : Optional[int] = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
A_ : List[Any] = rename(lowerCamelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Tuple = transform_conv_attns(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
else:
A_ : int = v
return new_state_dict
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if len(lowerCamelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
A_ : Optional[Any] = v[:, :, 0]
else:
# bias
A_ : Union[str, Any] = v
else:
# qkv matrices
A_ : Optional[int] = v.shape[0]
A_ : str = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
A_ : int = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
A_ : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
A_ : Dict = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
A_ : int = download(lowerCamelCase__ )
A_ : Any = MODELS_MAP[model_name]["""sample_rate"""]
A_ : List[Any] = MODELS_MAP[model_name]["""sample_size"""]
A_ : Tuple = Object()
A_ : Union[str, Any] = sample_size
A_ : Tuple = sample_rate
A_ : int = 0
A_ : List[Any] = UNetaDModel(sample_size=lowerCamelCase__ , sample_rate=lowerCamelCase__ )
A_ : Optional[Any] = diffusers_model.state_dict()
A_ : Dict = DiffusionUncond(lowerCamelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCamelCase__ )["""state_dict"""] )
A_ : Any = orig_model.diffusion_ema.eval()
A_ : Any = orig_model.state_dict()
A_ : List[str] = rename_orig_weights(lowerCamelCase__ )
A_ : Any = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
A_ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCamelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("""kernel""" ) for k in list(lowerCamelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
A_ : str = value.squeeze()
A_ : Union[str, Any] = value
diffusers_model.load_state_dict(lowerCamelCase__ )
A_ : Optional[Any] = 1_00
A_ : Union[str, Any] = 33
A_ : Any = IPNDMScheduler(num_train_timesteps=lowerCamelCase__ )
A_ : List[str] = torch.manual_seed(lowerCamelCase__ )
A_ : Any = torch.randn([1, 2, config.sample_size] , generator=lowerCamelCase__ ).to(lowerCamelCase__ )
A_ : str = torch.linspace(1 , 0 , steps + 1 , device=lowerCamelCase__ )[:-1]
A_ : List[Any] = get_crash_schedule(lowerCamelCase__ )
A_ : str = DanceDiffusionPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
A_ : str = torch.manual_seed(33 )
A_ : int = pipe(num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ ).audios
A_ : Optional[int] = sampling.iplms_sample(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , {} )
A_ : str = generated.clamp(-1 , 1 )
A_ : List[Any] = (generated - audio).abs().sum()
A_ : int = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , lowerCamelCase__ )
print("""Diff max""" , lowerCamelCase__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase :List[str] = parser.parse_args()
main(args) | 686 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def a ( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1)) | 686 |
'''simple docstring'''
from math import factorial
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if successes > trials:
raise ValueError("""successes must be lower or equal to trials""" )
if trials < 0 or successes < 0:
raise ValueError("""the function is defined for non-negative integers""" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""the function is defined for non-negative integers""" )
if not 0 < prob < 1:
raise ValueError("""prob has to be in range of 1 - 0""" )
A_ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
A_ : Union[str, Any] = float(factorial(lowerCamelCase__ ) )
coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75)) | 686 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def _a (self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
A_ : Optional[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = TFAutoModel.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Any = AutoModel.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def _a (self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
A_ : List[str] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[str] = TFAutoModelForPreTraining.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Union[str, Any] = AutoModelForPreTraining.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def _a (self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[str] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase , from_pt=lowercase )
A_, A_ : str = TFAutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Dict = AutoModelForCausalLM.from_pretrained(lowercase , from_tf=lowercase )
A_, A_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def _a (self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : List[str] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : str = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def _a (self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase , from_pt=lowercase )
A_, A_ : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoModelForMaskedLM.from_pretrained(lowercase , from_tf=lowercase )
A_, A_ : Optional[Any] = AutoModelForMaskedLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def _a (self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Tuple = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , from_pt=lowercase )
A_, A_ : int = TFAutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(lowercase , from_tf=lowercase )
A_, A_ : Dict = AutoModelForSeqaSeqLM.from_pretrained(
lowercase , output_loading_info=lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def _a (self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
A_ : List[Any] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : Dict = TFAutoModelForSequenceClassification.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
@slow
def _a (self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
A_ : Optional[int] = AutoConfig.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : int = TFAutoModelForQuestionAnswering.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
A_ : int = AutoModelForQuestionAnswering.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsNotNone(lowercase )
self.assertIsInstance(lowercase , lowercase )
def _a (self ):
A_ : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
A_ : List[str] = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
def _a (self ):
A_ : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 )
A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual(model.num_parameters() , 14410 )
self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 14410 ) | 686 |
'''simple docstring'''
import re
def a ( lowerCamelCase__ ):
'''simple docstring'''
if len(re.findall("""[ATCG]""" , lowerCamelCase__ ) ) != len(lowerCamelCase__ ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 686 | 1 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
A_, A_ : List[Any] = head.next, head
while fast and fast.next:
A_ : Dict = fast.next.next
A_ : Dict = slow.next
A_ : Dict = slow.next
A_ : int = None # Don't forget here! But forget still works!
# reverse the second part
A_ : Tuple = None
while second:
A_ : Tuple = second.next
A_ : Optional[Any] = node
A_ : List[str] = second
A_ : Dict = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
A_ : int = node.next
A_ : Optional[Any] = head.next
return True
def a ( lowerCamelCase__ ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
A_ : int = head
while fast and fast.next:
A_, A_ : str = fast.next.next, slow.next
# 2. Push the second half into the stack
A_ : int = [slow.val]
while slow.next:
A_ : List[Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
A_ : List[str] = cur.next
return True
def a ( lowerCamelCase__ ):
'''simple docstring'''
if not head or not head.next:
return True
A_ : List[Any] = {}
A_ : int = 0
while head:
if head.val in d:
d[head.val].append(lowerCamelCase__ )
else:
A_ : List[Any] = [pos]
A_ : str = head.next
pos += 1
A_ : List[str] = pos - 1
A_ : Tuple = 0
for v in d.values():
if len(lowerCamelCase__ ) % 2 != 0:
middle += 1
else:
A_ : int = 0
for i in range(0 , len(lowerCamelCase__ ) ):
if v[i] + v[len(lowerCamelCase__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True | 686 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCamelCase__ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def a ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCamelCase__ ):
http_head("""https://huggingface.co""" ) | 686 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _lowerCAmelCase :
def __init__(self , lowercase = None ):
if components is None:
A_ : Tuple = []
A_ : List[Any] = list(lowercase )
def __len__(self ):
return len(self.__components )
def __str__(self ):
return "(" + ",".join(map(lowercase , self.__components ) ) + ")"
def __add__(self , lowercase ):
A_ : Dict = len(self )
if size == len(lowercase ):
A_ : List[Any] = [self.__components[i] + other.component(lowercase ) for i in range(lowercase )]
return Vector(lowercase )
else:
raise Exception("""must have the same size""" )
def __sub__(self , lowercase ):
A_ : Any = len(self )
if size == len(lowercase ):
A_ : List[Any] = [self.__components[i] - other.component(lowercase ) for i in range(lowercase )]
return Vector(lowercase )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__(self , lowercase ):
...
@overload
def __mul__(self , lowercase ):
...
def __mul__(self , lowercase ):
if isinstance(lowercase , (float, int) ):
A_ : Dict = [c * other for c in self.__components]
return Vector(lowercase )
elif isinstance(lowercase , lowercase ) and len(self ) == len(lowercase ):
A_ : Optional[int] = len(self )
A_ : int = [self.__components[i] * other.component(lowercase ) for i in range(lowercase )]
return sum(lowercase )
else: # error case
raise Exception("""invalid operand!""" )
def _a (self ):
return Vector(self.__components )
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def _a (self , lowercase , lowercase ):
assert -len(self.__components ) <= pos < len(self.__components )
A_ : List[str] = value
def _a (self ):
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
A_ : int = [c**2 for c in self.__components]
return math.sqrt(sum(lowercase ) )
def _a (self , lowercase , lowercase = False ):
A_ : Dict = self * other
A_ : Optional[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def a ( lowerCamelCase__ ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
return Vector([0] * dimension )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (isinstance(lowerCamelCase__ , lowerCamelCase__ ))
A_ : Optional[Any] = [0] * dimension
A_ : Tuple = 1
return Vector(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
and isinstance(lowerCamelCase__ , lowerCamelCase__ )
and (isinstance(lowerCamelCase__ , (int, float) ))
)
return x * scalar + y
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
random.seed(lowerCamelCase__ )
A_ : Optional[int] = [random.randint(lowerCamelCase__ , lowerCamelCase__ ) for _ in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase ):
A_ : int = matrix
A_ : List[str] = w
A_ : Optional[Any] = h
def __str__(self ):
A_ : Union[str, Any] = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__(self , lowercase ):
if self.__width == other.width() and self.__height == other.height():
A_ : Optional[Any] = []
for i in range(self.__height ):
A_ : int = [
self.__matrix[i][j] + other.component(lowercase , lowercase )
for j in range(self.__width )
]
matrix.append(lowercase )
return Matrix(lowercase , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__(self , lowercase ):
if self.__width == other.width() and self.__height == other.height():
A_ : Tuple = []
for i in range(self.__height ):
A_ : List[str] = [
self.__matrix[i][j] - other.component(lowercase , lowercase )
for j in range(self.__width )
]
matrix.append(lowercase )
return Matrix(lowercase , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__(self , lowercase ):
...
@overload
def __mul__(self , lowercase ):
...
def __mul__(self , lowercase ):
if isinstance(lowercase , lowercase ): # matrix-vector
if len(lowercase ) == self.__width:
A_ : Optional[Any] = zero_vector(self.__height )
for i in range(self.__height ):
A_ : int = [
self.__matrix[i][j] * other.component(lowercase )
for j in range(self.__width )
]
ans.change_component(lowercase , sum(lowercase ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowercase , (int, float) ): # matrix-scalar
A_ : List[str] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowercase , self.__width , self.__height )
return None
def _a (self ):
return self.__height
def _a (self ):
return self.__width
def _a (self , lowercase , lowercase ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def _a (self , lowercase , lowercase , lowercase ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A_ : Optional[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def _a (self , lowercase , lowercase ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
A_ : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowercase ) ):
A_ : Optional[Any] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowercase , self.__width - 1 , self.__height - 1 ).determinant()
def _a (self , lowercase , lowercase ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowercase , lowercase )
else:
raise Exception("""Indices out of bounds""" )
def _a (self ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A_ : List[Any] = [
self.__matrix[0][y] * self.cofactor(0 , lowercase ) for y in range(self.__width )
]
return sum(lowercase )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : list[list[float]] = [[0] * n for _ in range(lowerCamelCase__ )]
return Matrix(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
random.seed(lowerCamelCase__ )
A_ : list[list[float]] = [
[random.randint(lowerCamelCase__ , lowerCamelCase__ ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )
]
return Matrix(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) | 686 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCamelCase :Any = re.compile(R'''\s+''')
def a ( lowerCamelCase__ ):
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowerCamelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = [len(lowerCamelCase__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(lowerCamelCase__ ), "line_max": max(lowerCamelCase__ )}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def a ( lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : Tuple = ["""auto-generated""", """autogenerated""", """automatically generated"""]
A_ : Optional[int] = example["""content"""].splitlines()
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def a ( lowerCamelCase__ , lowerCamelCase__=5 , lowerCamelCase__=0.05 ):
'''simple docstring'''
A_ : Any = ["""unit tests""", """test file""", """configuration file"""]
A_ : List[str] = example["""content"""].splitlines()
A_ : str = 0
A_ : Union[str, Any] = 0
# first test
for _, line in zip(range(lowerCamelCase__ ) , lowerCamelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
A_ : List[Any] = example["""content"""].count("""\n""" )
A_ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = ["""def """, """class """, """for """, """while """]
A_ : Optional[int] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def a ( lowerCamelCase__ , lowerCamelCase__=4 ):
'''simple docstring'''
A_ : Tuple = example["""content"""].splitlines()
A_ : int = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = tokenizer(example["""content"""] , truncation=lowerCamelCase__ )["""input_ids"""]
A_ : Optional[Any] = len(example["""content"""] ) / len(lowerCamelCase__ )
return {"ratio": ratio}
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = {}
results.update(get_hash(lowerCamelCase__ ) )
results.update(line_stats(lowerCamelCase__ ) )
results.update(alpha_stats(lowerCamelCase__ ) )
results.update(char_token_ratio(lowerCamelCase__ ) )
results.update(is_autogenerated(lowerCamelCase__ ) )
results.update(is_config_or_test(lowerCamelCase__ ) )
results.update(has_no_keywords(lowerCamelCase__ ) )
results.update(has_few_assignments(lowerCamelCase__ ) )
return results
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if not check_uniques(lowerCamelCase__ , lowerCamelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def a ( lowerCamelCase__ ):
'''simple docstring'''
with open(lowerCamelCase__ , """rb""" ) as f_in:
with gzip.open(str(lowerCamelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
os.unlink(lowerCamelCase__ )
# Settings
lowerCamelCase :Optional[int] = HfArgumentParser(PreprocessingArguments)
lowerCamelCase :Tuple = parser.parse_args()
if args.num_workers is None:
lowerCamelCase :Tuple = multiprocessing.cpu_count()
lowerCamelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCamelCase :List[Any] = time.time()
lowerCamelCase :Optional[int] = load_dataset(args.dataset_name, split='''train''')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
lowerCamelCase :int = time.time()
lowerCamelCase :List[str] = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
lowerCamelCase :int = set(ds.unique('''hash'''))
lowerCamelCase :List[str] = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
lowerCamelCase :Dict = time.time()
lowerCamelCase :int = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCamelCase :List[str] = time.time()
lowerCamelCase , lowerCamelCase :int = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
lowerCamelCase :int = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
lowerCamelCase :Tuple = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
lowerCamelCase :Tuple = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCamelCase :List[str] = str(data_dir / F"file-{file_number+1:012}.json")
lowerCamelCase :Tuple = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 686 | 1 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = [False] * len(lowerCamelCase__ )
A_ : Tuple = [-1] * len(lowerCamelCase__ )
def dfs(lowerCamelCase__ , lowerCamelCase__ ):
A_ : Optional[Any] = True
A_ : Any = c
for u in graph[v]:
if not visited[u]:
dfs(lowerCamelCase__ , 1 - c )
for i in range(len(lowerCamelCase__ ) ):
if not visited[i]:
dfs(lowerCamelCase__ , 0 )
for i in range(len(lowerCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
lowerCamelCase :Union[str, Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph)) | 686 |
'''simple docstring'''
import pytest
lowerCamelCase :Optional[Any] = '''__dummy_dataset1__'''
lowerCamelCase :List[Any] = '''
import json
import os
import datasets
REPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"
URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"I-PER",
"B-ORG",
"I-ORG",
"B-LOC",
"I-LOC",
]
)
),
"langs": datasets.Sequence(datasets.Value("string")),
"spans": datasets.Sequence(datasets.Value("string")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),
]
def _generate_examples(self, filepath):
with open(filepath, "r", encoding="utf-8") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
'''
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def a ( ):
'''simple docstring'''
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = dataset_loading_script_name
A_ : int = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase__ )
A_ : Tuple = script_dir / f'{script_name}.py'
with open(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ )
return str(lowerCamelCase__ ) | 686 | 1 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Tuple = 'linear'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'cosine_with_restarts'
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'polynomial'
__SCREAMING_SNAKE_CASE : Optional[int] = 'constant'
__SCREAMING_SNAKE_CASE : str = 'constant_with_warmup'
__SCREAMING_SNAKE_CASE : Dict = 'piecewise_constant'
def a ( lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
return LambdaLR(lowerCamelCase__ , lambda lowerCamelCase__ : 1 , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1.0 , lowerCamelCase__ ) )
return 1.0
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = -1 ):
'''simple docstring'''
A_ : Optional[Any] = {}
A_ : Optional[Any] = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A_, A_ : Union[str, Any] = rule_str.split(""":""" )
A_ : Union[str, Any] = int(lowerCamelCase__ )
A_ : List[Any] = float(lowerCamelCase__ )
A_ : Union[str, Any] = value
A_ : Optional[int] = float(rule_list[-1] )
def create_rules_function(lowerCamelCase__ , lowerCamelCase__ ):
def rule_func(lowerCamelCase__ ) -> float:
A_ : str = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCamelCase__ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A_ : str = create_rules_function(lowerCamelCase__ , lowerCamelCase__ )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , last_epoch=lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=-1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 0.5 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCamelCase__ ) * 2.0 * progress )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 , lowerCamelCase__ = -1 ):
'''simple docstring'''
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
A_ : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCamelCase__ ) * progress) % 1.0) )) )
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=1E-7 , lowerCamelCase__=1.0 , lowerCamelCase__=-1 ):
'''simple docstring'''
A_ : Optional[Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(lowerCamelCase__ ):
if current_step < num_warmup_steps:
return float(lowerCamelCase__ ) / float(max(1 , lowerCamelCase__ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A_ : str = lr_init - lr_end
A_ : Tuple = num_training_steps - num_warmup_steps
A_ : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
A_ : Optional[int] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase :List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 1 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = -1 , ):
'''simple docstring'''
A_ : Optional[Any] = SchedulerType(lowerCamelCase__ )
A_ : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCamelCase__ , last_epoch=lowerCamelCase__ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCamelCase__ , step_rules=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , num_cycles=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , power=lowerCamelCase__ , last_epoch=lowerCamelCase__ , )
return schedule_func(
lowerCamelCase__ , num_warmup_steps=lowerCamelCase__ , num_training_steps=lowerCamelCase__ , last_epoch=lowerCamelCase__ ) | 686 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCamelCase :int = datasets.load_iris()
lowerCamelCase :str = np.array(data['''data'''])
lowerCamelCase :Dict = np.array(data['''target'''])
lowerCamelCase :Union[str, Any] = data['''target_names''']
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
A_ : List[str] = []
for data_point in data:
A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 686 | 1 |
'''simple docstring'''
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=99 , lowercase=13 , lowercase=16 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=False , lowercase=True , lowercase=2 , lowercase=32 , lowercase=4 , lowercase=4 , lowercase=30 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=None , ):
A_ : Optional[Any] = parent
A_ : Tuple = batch_size
A_ : Optional[int] = decoder_seq_length
# For common tests
A_ : List[Any] = self.decoder_seq_length
A_ : int = is_training
A_ : Union[str, Any] = use_attention_mask
A_ : Any = use_labels
A_ : List[Any] = vocab_size
A_ : str = d_model
A_ : Dict = d_model
A_ : Any = decoder_layers
A_ : Dict = decoder_layers
A_ : Optional[int] = decoder_ffn_dim
A_ : Dict = decoder_attention_heads
A_ : Tuple = decoder_attention_heads
A_ : Tuple = eos_token_id
A_ : int = bos_token_id
A_ : List[str] = pad_token_id
A_ : int = decoder_start_token_id
A_ : List[str] = use_cache
A_ : List[Any] = max_position_embeddings
A_ : Optional[Any] = None
A_ : str = decoder_seq_length
A_ : int = 2
A_ : Optional[Any] = 1
def _a (self ):
A_ : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
A_ : Union[str, Any] = None
if self.use_attention_mask:
A_ : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
A_ : Any = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
A_ : List[str] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _a (self , lowercase , lowercase , lowercase , lowercase , ):
A_ : Optional[Any] = True
A_ : Union[str, Any] = TrOCRDecoder(config=lowercase ).to(lowercase ).eval()
A_ : Dict = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
A_ : str = model(lowercase , use_cache=lowercase )
A_ : Dict = model(lowercase )
A_ : List[Any] = model(lowercase , use_cache=lowercase )
self.parent.assertTrue(len(lowercase ) == len(lowercase ) )
self.parent.assertTrue(len(lowercase ) == len(lowercase ) + 1 )
A_ : Tuple = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
A_ : Tuple = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
A_ : List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
A_ : Optional[int] = model(lowercase )["""last_hidden_state"""]
A_ : int = model(lowercase , past_key_values=lowercase )["""last_hidden_state"""]
# select random slice
A_ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A_ : Optional[Any] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
A_ : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowercase , lowercase , atol=1E-3 )
def _a (self ):
A_ : Dict = self.prepare_config_and_inputs()
A_, A_, A_, A_ : int = config_and_inputs
A_ : Dict = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Tuple = (TrOCRForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : Optional[Any] = False
def _a (self ):
A_ : Optional[Any] = TrOCRStandaloneDecoderModelTester(self , is_training=lowercase )
A_ : Union[str, Any] = ConfigTester(self , config_class=lowercase )
def _a (self ):
pass
def _a (self ):
pass
def _a (self ):
pass
def _a (self ):
self.config_tester.run_common_tests()
def _a (self ):
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowercase )
def _a (self ):
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def _a (self ):
pass | 686 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase :List[str] = logging.get_logger(__name__)
class _lowerCAmelCase ( __UpperCAmelCase ):
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__(self , lowercase , lowercase , lowercase ):
if len(lowercase ) == 0 or len(lowercase ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(lowercase ) )
if isinstance(lowercase , lowercase ):
A_ : Tuple = [sequences]
A_ : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase=ZeroShotClassificationArgumentHandler() , *lowercase , **lowercase ):
A_ : int = args_parser
super().__init__(*lowercase , **lowercase )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _a (self ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _a (self , lowercase , lowercase=True , lowercase=True , lowercase=TruncationStrategy.ONLY_FIRST , **lowercase ):
A_ : Any = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
A_ : str = self.tokenizer.eos_token
try:
A_ : str = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=lowercase , )
except Exception as e:
if "too short" in str(lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A_ : Any = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _a (self , **lowercase ):
if kwargs.get("""multi_class""" , lowercase ) is not None:
A_ : Tuple = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
A_ : Optional[Any] = {}
if "candidate_labels" in kwargs:
A_ : str = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
A_ : List[str] = kwargs["""hypothesis_template"""]
A_ : List[Any] = {}
if "multi_label" in kwargs:
A_ : Optional[Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__(self , lowercase , *lowercase , **lowercase , ):
if len(lowercase ) == 0:
pass
elif len(lowercase ) == 1 and "candidate_labels" not in kwargs:
A_ : Union[str, Any] = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowercase , **lowercase )
def _a (self , lowercase , lowercase=None , lowercase="This example is {}." ):
A_, A_ : int = self._args_parser(lowercase , lowercase , lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowercase , lowercase ) ):
A_ : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowercase ) - 1,
**model_input,
}
def _a (self , lowercase ):
A_ : Optional[Any] = inputs["""candidate_label"""]
A_ : List[Any] = inputs["""sequence"""]
A_ : List[str] = {k: inputs[k] for k in self.tokenizer.model_input_names}
A_ : List[str] = self.model(**lowercase )
A_ : str = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _a (self , lowercase , lowercase=False ):
A_ : Any = [outputs["""candidate_label"""] for outputs in model_outputs]
A_ : str = [outputs["""sequence"""] for outputs in model_outputs]
A_ : Dict = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
A_ : Dict = logits.shape[0]
A_ : Any = len(lowercase )
A_ : List[str] = N // n
A_ : Tuple = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A_ : Union[str, Any] = self.entailment_id
A_ : Any = -1 if entailment_id == 0 else 0
A_ : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]]
A_ : Union[str, Any] = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Optional[Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A_ : Optional[int] = reshaped_outputs[..., self.entailment_id]
A_ : int = np.exp(lowercase ) / np.exp(lowercase ).sum(-1 , keepdims=lowercase )
A_ : Any = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 686 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase :Dict = {
'''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = 'pegasus'
__SCREAMING_SNAKE_CASE : Dict = ['past_key_values']
__SCREAMING_SNAKE_CASE : Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__(self , lowercase=50265 , lowercase=1024 , lowercase=12 , lowercase=4096 , lowercase=16 , lowercase=12 , lowercase=4096 , lowercase=16 , lowercase=0.0 , lowercase=0.0 , lowercase=True , lowercase=True , lowercase="gelu" , lowercase=1024 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=0 , lowercase=False , lowercase=0 , lowercase=1 , lowercase=1 , **lowercase , ):
A_ : Any = vocab_size
A_ : List[Any] = max_position_embeddings
A_ : Optional[int] = d_model
A_ : List[str] = encoder_ffn_dim
A_ : str = encoder_layers
A_ : Optional[int] = encoder_attention_heads
A_ : Union[str, Any] = decoder_ffn_dim
A_ : Tuple = decoder_layers
A_ : str = decoder_attention_heads
A_ : Union[str, Any] = dropout
A_ : int = attention_dropout
A_ : Dict = activation_dropout
A_ : str = activation_function
A_ : List[str] = init_std
A_ : List[str] = encoder_layerdrop
A_ : List[str] = decoder_layerdrop
A_ : str = use_cache
A_ : Dict = encoder_layers
A_ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase , eos_token_id=lowercase , is_encoder_decoder=lowercase , decoder_start_token_id=lowercase , forced_eos_token_id=lowercase , **lowercase , )
@property
def _a (self ):
return self.encoder_attention_heads
@property
def _a (self ):
return self.d_model | 686 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase :int = logging.get_logger(__name__)
lowerCamelCase :Tuple = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : int = 'yolos'
def __init__(self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1E-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ):
super().__init__(**lowercase )
A_ : List[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Any = intermediate_size
A_ : int = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[str] = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : List[str] = image_size
A_ : str = patch_size
A_ : int = num_channels
A_ : Optional[int] = qkv_bias
A_ : List[Any] = num_detection_tokens
A_ : Tuple = use_mid_position_embeddings
A_ : int = auxiliary_loss
# Hungarian matcher
A_ : int = class_cost
A_ : List[Any] = bbox_cost
A_ : Optional[int] = giou_cost
# Loss coefficients
A_ : Any = bbox_loss_coefficient
A_ : List[Any] = giou_loss_coefficient
A_ : str = eos_coefficient
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : str = version.parse('1.11' )
@property
def _a (self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _a (self ):
return 1E-4
@property
def _a (self ):
return 12 | 686 | 1 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCamelCase :Optional[int] = '''pt'''
elif is_tf_available():
lowerCamelCase :Any = '''tf'''
else:
lowerCamelCase :str = '''jax'''
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = PerceiverTokenizer
__SCREAMING_SNAKE_CASE : List[Any] = False
def _a (self ):
super().setUp()
A_ : Optional[Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _a (self ):
return PerceiverTokenizer.from_pretrained("""deepmind/language-perceiver""" )
def _a (self , **lowercase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def _a (self , lowercase , lowercase=False , lowercase=20 , lowercase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
A_ : Optional[Any] = []
for i in range(len(lowercase ) ):
try:
A_ : str = tokenizer.decode([i] , clean_up_tokenization_spaces=lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
A_ : Tuple = list(filter(lambda lowercase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , lowercase ) )
A_ : Dict = list(filter(lambda lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowercase ) , lowercase ) )
if max_length is not None and len(lowercase ) > max_length:
A_ : Union[str, Any] = toks[:max_length]
if min_length is not None and len(lowercase ) < min_length and len(lowercase ) > 0:
while len(lowercase ) < min_length:
A_ : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
A_ : List[Any] = [t[0] for t in toks]
# Ensure consistency
A_ : Optional[int] = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase )
if " " not in output_txt and len(lowercase ) > 1:
A_ : Optional[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowercase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowercase )
)
if with_prefix_space:
A_ : str = """ """ + output_txt
A_ : Dict = tokenizer.encode(lowercase , add_special_tokens=lowercase )
return output_txt, output_ids
def _a (self ):
A_ : Any = self.perceiver_tokenizer
A_ : Optional[Any] = """Unicode €."""
A_ : str = tokenizer(lowercase )
A_ : Any = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["""input_ids"""] , lowercase )
# decoding
A_ : int = tokenizer.decode(lowercase )
self.assertEqual(lowercase , """[CLS]Unicode €.[SEP]""" )
A_ : Optional[Any] = tokenizer("""e è é ê ë""" )
A_ : str = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["""input_ids"""] , lowercase )
# decoding
A_ : int = tokenizer.decode(lowercase )
self.assertEqual(lowercase , """[CLS]e è é ê ë[SEP]""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """[CLS]e è é ê ë[SEP]""" )
def _a (self ):
A_ : Dict = self.perceiver_tokenizer
A_ : Optional[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
A_ : List[Any] = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
A_ : str = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
self.assertIsInstance(lowercase , lowercase )
if FRAMEWORK != "jax":
A_ : Dict = list(batch.input_ids.numpy()[0] )
else:
A_ : Union[str, Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def _a (self ):
A_ : List[str] = self.perceiver_tokenizer
A_ : Optional[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
A_ : Dict = tokenizer(lowercase , padding=lowercase , return_tensors=lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , lowercase )
self.assertIn("""attention_mask""" , lowercase )
self.assertNotIn("""decoder_input_ids""" , lowercase )
self.assertNotIn("""decoder_attention_mask""" , lowercase )
def _a (self ):
A_ : str = self.perceiver_tokenizer
A_ : int = [
"""Summary of the text.""",
"""Another summary.""",
]
A_ : Any = tokenizer(
text_target=lowercase , max_length=32 , padding="""max_length""" , truncation=lowercase , return_tensors=lowercase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def _a (self ):
# safety check on max_len default value so we are sure the test works
A_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
A_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
A_ : List[str] = tempfile.mkdtemp()
A_ : int = """ He is very happy, UNwant\u00E9d,running"""
A_ : Union[str, Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
A_ : Tuple = tokenizer.__class__.from_pretrained(lowercase )
A_ : Optional[Any] = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
A_ : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
A_ : List[str] = tempfile.mkdtemp()
A_ : int = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
A_ : Optional[int] = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
A_ : Optional[int] = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
A_ : Dict = tokenizer.__class__.from_pretrained(lowercase )
A_ : int = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
A_ : Tuple = tokenizer.__class__.from_pretrained(lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowercase )
def _a (self ):
A_ : Optional[Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
A_ : Any = json.load(lowercase )
with open(os.path.join(lowercase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
A_ : str = json.load(lowercase )
A_ : Dict = [F'<extra_id_{i}>' for i in range(125 )]
A_ : List[str] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
A_ : Optional[int] = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(lowercase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A_ : Any = tokenizer_class.from_pretrained(
lowercase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A_ : Dict = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=lowercase )]
A_ : str = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def _a (self ):
A_ : Dict = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , """�""" )
def _a (self ):
pass
def _a (self ):
pass
def _a (self ):
pass
def _a (self ):
pass
def _a (self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
A_ : List[str] = self.get_tokenizers(fast=lowercase , do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
A_ : Dict = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""]
A_ : List[str] = tokenizer.convert_tokens_to_string(lowercase )
self.assertIsInstance(lowercase , lowercase ) | 686 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowerCamelCase :int = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowerCamelCase :int = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
lowerCamelCase :Optional[Any] = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def _a (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def _a (self , lowercase=None , lowercase=None , lowercase=False ):
if concatenate_texts:
return compute_measures(lowercase , lowercase )["wer"]
else:
A_ : List[Any] = 0
A_ : Optional[int] = 0
for prediction, reference in zip(lowercase , lowercase ):
A_ : Any = compute_measures(lowercase , lowercase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 686 | 1 |
'''simple docstring'''
from collections import defaultdict
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : str = 1
A_ : List[Any] = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCamelCase__ )
if ret % 2 == 0:
cuts.append(lowerCamelCase__ )
return ret
def a ( ):
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
lowerCamelCase , lowerCamelCase :Optional[Any] = 1_0, 9
lowerCamelCase :int = defaultdict(list)
lowerCamelCase :dict[int, bool] = {}
lowerCamelCase :list[int] = []
lowerCamelCase :Tuple = 0
lowerCamelCase :Dict = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1) | 686 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = CycleDiffusionPipeline
__SCREAMING_SNAKE_CASE : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'latents'}
__SCREAMING_SNAKE_CASE : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
__SCREAMING_SNAKE_CASE : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a (self ):
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0 )
A_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ : int = CLIPTextModel(lowercase )
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _a (self , lowercase , lowercase=0 ):
A_ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A_ : int = image / 2 + 0.5
if str(lowercase ).startswith("""mps""" ):
A_ : int = torch.manual_seed(lowercase )
else:
A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : Union[str, Any] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _a (self ):
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.get_dummy_components()
A_ : Any = CycleDiffusionPipeline(**lowercase )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = self.get_dummy_inputs(lowercase )
A_ : str = pipe(**lowercase )
A_ : str = output.images
A_ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Tuple = np.array([0.44_59, 0.49_43, 0.45_44, 0.66_43, 0.54_74, 0.43_27, 0.57_01, 0.59_59, 0.51_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a (self ):
A_ : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(lowercase , """half""" ):
A_ : List[str] = module.half()
A_ : List[Any] = CycleDiffusionPipeline(**lowercase )
A_ : Optional[Any] = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Any = self.get_dummy_inputs(lowercase )
A_ : Tuple = pipe(**lowercase )
A_ : List[str] = output.images
A_ : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A_ : Optional[int] = np.array([0.35_06, 0.45_43, 0.4_46, 0.45_75, 0.51_95, 0.41_55, 0.52_73, 0.5_18, 0.41_16] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _a (self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def _a (self ):
return super().test_inference_batch_single_identical()
@skip_mps
def _a (self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a (self ):
return super().test_save_load_optional_components()
@skip_mps
def _a (self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A_ : List[str] = init_image.resize((512, 512) )
A_ : Dict = """CompVis/stable-diffusion-v1-4"""
A_ : List[Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : Any = CycleDiffusionPipeline.from_pretrained(
lowercase , scheduler=lowercase , safety_checker=lowercase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : str = """A black colored car"""
A_ : Dict = """A blue colored car"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : Optional[int] = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : str = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def _a (self ):
A_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A_ : Optional[int] = init_image.resize((512, 512) )
A_ : Optional[int] = """CompVis/stable-diffusion-v1-4"""
A_ : Union[str, Any] = DDIMScheduler.from_pretrained(lowercase , subfolder="""scheduler""" )
A_ : List[str] = CycleDiffusionPipeline.from_pretrained(lowercase , scheduler=lowercase , safety_checker=lowercase )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
pipe.enable_attention_slicing()
A_ : Optional[Any] = """A black colored car"""
A_ : int = """A blue colored car"""
A_ : str = torch.manual_seed(0 )
A_ : Any = pipe(
prompt=lowercase , source_prompt=lowercase , image=lowercase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=lowercase , output_type="""np""" , )
A_ : int = output.images
assert np.abs(image - expected_image ).max() < 2E-2 | 686 | 1 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def a ( ):
'''simple docstring'''
A_ : Union[str, Any] = 10
A_ : Any = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
A_ : List[str] = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(lowerCamelCase__ ) ),
} , features=lowerCamelCase__ , )
return dataset
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowerCamelCase__ )
return filename
# FILE_CONTENT + files
lowerCamelCase :str = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
A_ : str = FILE_CONTENT
with open(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ )
return filename
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
import bza
A_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
A_ : List[Any] = bytes(lowerCamelCase__ , """utf-8""" )
with bza.open(lowerCamelCase__ , """wb""" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
import gzip
A_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
A_ : List[Any] = bytes(lowerCamelCase__ , """utf-8""" )
with gzip.open(lowerCamelCase__ , """wb""" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
A_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
A_ : int = bytes(lowerCamelCase__ , """utf-8""" )
with lza.frame.open(lowerCamelCase__ , """wb""" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
A_ : Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowerCamelCase__ , """w""" ) as archive:
archive.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
import tarfile
A_ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowerCamelCase__ , """w""" ) as f:
f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
import lzma
A_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
A_ : Optional[int] = bytes(lowerCamelCase__ , """utf-8""" )
with lzma.open(lowerCamelCase__ , """wb""" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
import zipfile
A_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
A_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
A_ : int = bytes(lowerCamelCase__ , """utf-8""" )
with zstd.open(lowerCamelCase__ , """wb""" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
A_ : List[str] = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ )
return filename
lowerCamelCase :Optional[int] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
lowerCamelCase :List[Any] = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
lowerCamelCase :Dict = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
lowerCamelCase :List[Any] = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
lowerCamelCase :Union[str, Any] = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""" )
def a ( ):
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = datasets.Dataset.from_dict(lowerCamelCase__ )
A_ : List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowerCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowerCamelCase__ ) ) as con:
A_ : int = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowerCamelCase__ , """w""" , newline="""""" ) as f:
A_ : List[Any] = csv.DictWriter(lowerCamelCase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowerCamelCase__ , """w""" , newline="""""" ) as f:
A_ : Dict = csv.DictWriter(lowerCamelCase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
import bza
A_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowerCamelCase__ , """rb""" ) as f:
A_ : Any = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase__ , """wb""" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase__ ) ) )
f.write(lowerCamelCase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : str = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
A_ : List[Any] = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowerCamelCase__ , """wb""" ) as f:
A_ : List[str] = pq.ParquetWriter(lowerCamelCase__ , schema=lowerCamelCase__ )
A_ : Optional[Any] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase__ ) )] for k in DATA[0]} , schema=lowerCamelCase__ )
writer.write_table(lowerCamelCase__ )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
A_ : Union[str, Any] = {"""data""": DATA}
with open(lowerCamelCase__ , """w""" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
A_ : Optional[int] = {"""data""": DATA_DICT_OF_LISTS}
with open(lowerCamelCase__ , """w""" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowerCamelCase__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowerCamelCase__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowerCamelCase__ , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowerCamelCase__ , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
import gzip
A_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowerCamelCase__ , """rb""" ) as orig_file:
with gzip.open(lowerCamelCase__ , """wb""" ) as zipped_file:
zipped_file.writelines(lowerCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
import gzip
A_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowerCamelCase__ , """rb""" ) as orig_file:
with gzip.open(lowerCamelCase__ , """wb""" ) as zipped_file:
zipped_file.writelines(lowerCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.join("""nested""" , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase__ ) ) )
f.write(lowerCamelCase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowerCamelCase__ , """w""" ) as f:
f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowerCamelCase__ , """w""" ) as f:
f.add(lowerCamelCase__ , arcname=os.path.join("""nested""" , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = ["""0""", """1""", """2""", """3"""]
A_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowerCamelCase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = ["""0""", """1""", """2""", """3"""]
A_ : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowerCamelCase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = ["""0""", """1""", """2""", """3"""]
A_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowerCamelCase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase__ ) ) )
f.write(lowerCamelCase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowerCamelCase__ , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
A_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="""session""" )
def a ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def a ( ):
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowerCamelCase__ , """w""" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir | 686 |
'''simple docstring'''
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = DownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'down'
def _a (self ):
A_ : Dict = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = ResnetDownsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'down'
def _a (self ):
A_ : Optional[int] = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = AttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_ : int = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = CrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
def _a (self ):
A_, A_ : str = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : int = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = SkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : Any = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnSkipDownBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_skip_sample=lowercase )
def _a (self ):
A_ : int = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = DownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : int = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[Any] = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = AttnDownEncoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Optional[Any] = 'down'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Tuple = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UNetMidBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'mid'
def _a (self ):
A_ : Optional[Any] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
A_ : Any = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : Optional[int] = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = UNetMidBlockaDCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : Optional[int] = 'mid'
def _a (self ):
A_, A_ : Dict = super().prepare_init_args_and_inputs_for_common()
A_ : List[str] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = UNetMidBlockaDSimpleCrossAttn # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'mid'
@property
def _a (self ):
return super().get_dummy_input(include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Tuple = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[int] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[Any] = UpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Union[str, Any] = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = ResnetUpsampleBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : Optional[Any] = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = CrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Any = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : Union[str, Any] = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Union[str, Any] = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = SimpleCrossAttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase , include_encoder_hidden_states=lowercase )
def _a (self ):
A_, A_ : Any = super().prepare_init_args_and_inputs_for_common()
A_ : int = 32
return init_dict, inputs_dict
def _a (self ):
A_ : Any = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[int] = AttnUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[str] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _a (self ):
A_ : str = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = SkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Tuple = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnSkipUpBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : List[Any] = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_res_hidden_states_tuple=lowercase )
def _a (self ):
A_ : str = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = UpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : str = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : Tuple = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : str = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(lowercase )
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = AttnUpDecoderBlockaD # noqa F405
__SCREAMING_SNAKE_CASE : Dict = 'up'
@property
def _a (self ):
return super().get_dummy_input(include_temb=lowercase )
def _a (self ):
A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _a (self ):
A_ : List[str] = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(lowercase ) | 686 | 1 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = tmp_path / """cache"""
A_ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Optional[int] = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = tmp_path / """cache"""
A_ : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : Optional[Any] = features.copy() if features else default_expected_features
A_ : Optional[int] = (
Features({feature: Value(lowerCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Dict = ParquetDatasetReader(lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = tmp_path / """cache"""
A_ : Any = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : Any = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ , split=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if issubclass(lowerCamelCase__ , lowerCamelCase__ ):
A_ : List[str] = parquet_path
elif issubclass(lowerCamelCase__ , lowerCamelCase__ ):
A_ : int = [parquet_path]
A_ : List[Any] = tmp_path / """cache"""
A_ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : str = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=("train",) ):
'''simple docstring'''
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
for split in splits:
A_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = tmp_path / """cache"""
A_ : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Union[str, Any] = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ ).read()
_check_parquet_datasetdict(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = tmp_path / """cache"""
A_ : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : List[Any] = features.copy() if features else default_expected_features
A_ : Tuple = (
Features({feature: Value(lowerCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : str = ParquetDatasetReader({"""train""": parquet_path} , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_datasetdict(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if split:
A_ : Tuple = {split: parquet_path}
else:
A_ : List[Any] = """train"""
A_ : str = {"""train""": parquet_path, """test""": parquet_path}
A_ : List[Any] = tmp_path / """cache"""
A_ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : Optional[Any] = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_datasetdict(lowerCamelCase__ , lowerCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = ParquetDatasetWriter(lowerCamelCase__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
A_ : Dict = pq.ParquetFile(tmp_path / """foo.parquet""" )
A_ : Optional[Any] = pf.read()
assert dataset.data.table == output_table
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = str(shared_datadir / """test_image_rgb.jpg""" )
A_ : Tuple = {"""image""": [image_path]}
A_ : Tuple = Features({"""image""": Image()} )
A_ : str = Dataset.from_dict(lowerCamelCase__ , features=lowerCamelCase__ )
A_ : List[str] = ParquetDatasetWriter(lowerCamelCase__ , tmp_path / """foo.parquet""" )
assert writer.write() > 0
A_ : Optional[Any] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) )
assert dataset.features == reloaded_dataset.features
A_ : List[str] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=lowerCamelCase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""" )} ), None),
(Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
assert get_writer_batch_size(lowerCamelCase__ ) == expected | 686 |
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ = None ):
'''simple docstring'''
A_ : List[Any] = word_bank or []
# create a table
A_ : int = len(lowerCamelCase__ ) + 1
A_ : list[list[list[str]]] = []
for _ in range(lowerCamelCase__ ):
table.append([] )
# seed value
A_ : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowerCamelCase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowerCamelCase__ )] == word:
A_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowerCamelCase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowerCamelCase__ )]:
combination.reverse()
return table[len(lowerCamelCase__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
) | 686 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase :int = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase :Tuple = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
lowerCamelCase :Tuple = {'''bert_for_seq_generation''': 5_1_2}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : List[int] = []
__SCREAMING_SNAKE_CASE : Any = ['input_ids', 'attention_mask']
def __init__(self , lowercase , lowercase="<s>" , lowercase="</s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<::::>" , lowercase = None , **lowercase , ):
A_ : str = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , sep_token=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
A_ : str = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
@property
def _a (self ):
return self.sp_model.get_piece_size()
def _a (self ):
A_ : Tuple = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
A_ : Optional[int] = self.__dict__.copy()
A_ : int = None
return state
def __setstate__(self , lowercase ):
A_ : List[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : Dict = {}
A_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a (self , lowercase ):
return self.sp_model.encode(lowercase , out_type=lowercase )
def _a (self , lowercase ):
return self.sp_model.piece_to_id(lowercase )
def _a (self , lowercase ):
A_ : str = self.sp_model.IdToPiece(lowercase )
return token
def _a (self , lowercase ):
A_ : Optional[Any] = []
A_ : Union[str, Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase ) + token
A_ : str = []
else:
current_sub_tokens.append(lowercase )
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def _a (self , lowercase , lowercase = None ):
if not os.path.isdir(lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : str = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
A_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,) | 686 |
'''simple docstring'''
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : int = []
A_ : int = set({"""(""", """[""", """{"""} )
A_ : Union[str, Any] = set({""")""", """]""", """}"""} )
A_ : Tuple = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(lowerCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase__ ) == 0
def a ( ):
'''simple docstring'''
A_ : int = input("""Enter sequence of brackets: """ )
if is_balanced(lowerCamelCase__ ):
print(lowerCamelCase__ , """is balanced""" )
else:
print(lowerCamelCase__ , """is not balanced""" )
if __name__ == "__main__":
main() | 686 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
lowerCamelCase :Any = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = 'lilt'
def __init__(self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase="absolute" , lowercase=None , lowercase=4 , lowercase=1024 , **lowercase , ):
super().__init__(pad_token_id=lowercase , **lowercase )
A_ : int = vocab_size
A_ : List[Any] = hidden_size
A_ : int = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : List[str] = hidden_act
A_ : Tuple = intermediate_size
A_ : Any = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Optional[int] = max_position_embeddings
A_ : List[str] = type_vocab_size
A_ : int = initializer_range
A_ : int = layer_norm_eps
A_ : int = position_embedding_type
A_ : List[Any] = classifier_dropout
A_ : Optional[Any] = channel_shrink_ratio
A_ : int = max_ad_position_embeddings | 686 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(
lowercase , split=lowercase , features=lowercase , cache_dir=lowercase , keep_in_memory=lowercase , streaming=lowercase , num_proc=lowercase , **lowercase , )
A_ : Optional[int] = field
A_ : Dict = path_or_paths if isinstance(lowercase , lowercase ) else {self.split: path_or_paths}
A_ : Optional[Any] = Json(
cache_dir=lowercase , data_files=lowercase , features=lowercase , field=lowercase , **lowercase , )
def _a (self ):
# Build iterable dataset
if self.streaming:
A_ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : int = None
A_ : Union[str, Any] = None
A_ : int = None
A_ : List[str] = None
self.builder.download_and_prepare(
download_config=lowercase , download_mode=lowercase , verification_mode=lowercase , base_path=lowercase , num_proc=self.num_proc , )
A_ : str = self.builder.as_dataset(
split=self.split , verification_mode=lowercase , in_memory=self.keep_in_memory )
return dataset
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase , lowercase = None , lowercase = None , **lowercase , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'num_proc {num_proc} must be an integer > 0.' )
A_ : Any = dataset
A_ : List[str] = path_or_buf
A_ : List[str] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
A_ : Optional[Any] = num_proc
A_ : List[Any] = """utf-8"""
A_ : int = to_json_kwargs
def _a (self ):
A_ : Tuple = self.to_json_kwargs.pop("""path_or_buf""" , lowercase )
A_ : Tuple = self.to_json_kwargs.pop("""orient""" , """records""" )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False )
A_ : Union[str, Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True )
A_ : Dict = self.to_json_kwargs.pop("""compression""" , lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowercase ) as buffer:
A_ : Tuple = self._write(file_obj=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'The compression parameter is not supported when writing to a buffer, but compression={compression}'
""" was passed. Please provide a local path instead.""" )
A_ : Union[str, Any] = self._write(
file_obj=self.path_or_buf , orient=lowercase , lines=lowercase , index=lowercase , **self.to_json_kwargs )
return written
def _a (self , lowercase ):
A_, A_, A_, A_, A_ : List[str] = args
A_ : List[str] = query_table(
table=self.dataset.data , key=slice(lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
A_ : Any = batch.to_pandas().to_json(
path_or_buf=lowercase , orient=lowercase , lines=lowercase , index=lowercase , **lowercase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a (self , lowercase , lowercase , lowercase , lowercase , **lowercase , ):
A_ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
A_ : Optional[int] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowercase )
else:
A_, A_ : Tuple = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowercase , lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowercase )
return written | 686 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowercase , lowercase=13 , lowercase=3 , lowercase=224 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , lowercase=[0.5, 0.5, 0.5] , lowercase=[0.5, 0.5, 0.5] , ):
A_ : List[Any] = size if size is not None else {"""height""": 18, """width""": 18}
A_ : Dict = parent
A_ : Optional[Any] = batch_size
A_ : Union[str, Any] = num_channels
A_ : int = image_size
A_ : Optional[Any] = min_resolution
A_ : str = max_resolution
A_ : Optional[int] = do_resize
A_ : Optional[int] = size
A_ : Optional[int] = do_normalize
A_ : str = image_mean
A_ : Dict = image_std
def _a (self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor if is_vision_available() else None
def _a (self ):
A_ : int = EfficientFormerImageProcessorTester(self )
@property
def _a (self ):
return self.image_proc_tester.prepare_image_processor_dict()
def _a (self ):
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , """image_mean""" ) )
self.assertTrue(hasattr(lowercase , """image_std""" ) )
self.assertTrue(hasattr(lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(lowercase , """do_resize""" ) )
self.assertTrue(hasattr(lowercase , """size""" ) )
def _a (self ):
pass
def _a (self ):
# Initialize image_processor
A_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
A_ : List[Any] = image_processor(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _a (self ):
# Initialize image_processor
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A_ : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _a (self ):
# Initialize image_processor
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A_ : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
A_ : List[str] = image_processor(lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , ) | 686 |
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
A_ : Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = get_model_to_test_mapping(lowercase )
A_ : List[str] = get_model_to_test_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A_ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = get_model_to_tester_mapping(lowercase )
A_ : Optional[int] = get_model_to_tester_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A_ : Dict = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) | 686 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.