code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[Any] = 3
snake_case : Any = 2_5_0
snake_case : List[Any] = ids_tensor((batch_size, length) , UpperCAmelCase_ )
snake_case : List[str] = torch.ones((batch_size, length) , device=UpperCAmelCase_ , dtype=torch.float ) / length
return input_ids, scores
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case , snake_case : str = self._get_tensors(5 )
snake_case : List[str] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case , snake_case : Optional[int] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case , snake_case : int = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = MaxLengthCriteria(max_length=1_0 )
snake_case , snake_case : int = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case , snake_case : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case , snake_case : List[Any] = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : List[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
snake_case , snake_case : Any = self._get_tensors(5 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case , snake_case : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case , snake_case : Union[str, Any] = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case : Dict = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def UpperCAmelCase ( self ) -> Any:
snake_case , snake_case : Dict = self._get_tensors(5 )
snake_case : List[Any] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case : str = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) )
def UpperCAmelCase ( self ) -> List[Any]:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(UpperCAmelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
snake_case : Tuple = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(UpperCAmelCase_ ) , 1 )
| 711 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCamelCase : Optional[int] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
lowerCamelCase : str = {'facebook/blenderbot-3B': 1_2_8}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
snake_case : Optional[Any] = (
list(range(ord("""!""" ) ,ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) ,ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) ,ord("""ÿ""" ) + 1 ) )
)
snake_case : Optional[Any] = bs[:]
snake_case : int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowercase )
cs.append(2**8 + n )
n += 1
snake_case : Optional[Any] = [chr(_lowercase ) for n in cs]
return dict(zip(_lowercase ,_lowercase ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
snake_case : Optional[Any] = set()
snake_case : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case : int = char
return pairs
class __lowercase (UpperCamelCase_ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , **A , ) -> Optional[int]:
snake_case : List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
snake_case : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
snake_case : Optional[int] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
snake_case : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
snake_case : Any = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
snake_case : Optional[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case : Tuple = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding="""utf-8""" ) as vocab_handle:
snake_case : Optional[int] = json.load(UpperCamelCase__ )
snake_case : str = {v: k for k, v in self.encoder.items()}
snake_case : Union[str, Any] = errors # how to handle errors in decoding
snake_case : Union[str, Any] = bytes_to_unicode()
snake_case : int = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding="""utf-8""" ) as merges_handle:
snake_case : Optional[int] = merges_handle.read().split("""\n""" )[1:-1]
snake_case : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
snake_case : str = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case : Dict = {}
snake_case : Optional[Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case : str = re.compile(r"""\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase ( self ) -> Dict:
return len(self.encoder )
def UpperCAmelCase ( self ) -> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , A ) -> int:
if token in self.cache:
return self.cache[token]
snake_case : str = tuple(UpperCamelCase__ )
snake_case : Union[str, Any] = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
snake_case : Dict = min(UpperCamelCase__ , key=lambda A : self.bpe_ranks.get(UpperCamelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case : Tuple = bigram
snake_case : Optional[int] = []
snake_case : str = 0
while i < len(UpperCamelCase__ ):
try:
snake_case : Optional[int] = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case : List[str] = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case : Optional[int] = tuple(UpperCamelCase__ )
snake_case : Tuple = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
snake_case : Union[str, Any] = get_pairs(UpperCamelCase__ )
snake_case : Any = ''' '''.join(UpperCamelCase__ )
snake_case : Dict = word
return word
def UpperCAmelCase ( self , A ) -> Optional[Any]:
snake_case : List[Any] = []
for token in re.findall(self.pat , UpperCamelCase__ ):
snake_case : Dict = ''''''.join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(""" """ ) )
return bpe_tokens
def UpperCAmelCase ( self , A ) -> Tuple:
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.decoder.get(UpperCamelCase__ )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Optional[Any] = ''''''.join(UpperCamelCase__ )
snake_case : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCAmelCase ( self , A , A = None ) -> Any:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Dict = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + """\n""" )
snake_case : Optional[int] = 0
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
snake_case : List[str] = token_index
writer.write(""" """.join(UpperCamelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase ( self , A , A = None , A = False ) -> Optional[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def UpperCAmelCase ( self , A , A = None ) -> Union[str, Any]:
snake_case : Optional[int] = [self.sep_token_id]
snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , A , A=False , **A ) -> Optional[Any]:
snake_case : Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
snake_case : str = ''' ''' + text
return (text, kwargs)
def UpperCAmelCase ( self , A , A = None ) -> Dict:
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A ) -> int:
snake_case : str = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase__ )
snake_case : str = ''' '''.join(UpperCamelCase__ )
snake_case : Dict = self.encode(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.model_max_length:
snake_case : Union[str, Any] = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 712 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684 | 0 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowercase :
"""simple docstring"""
@staticmethod
def UpperCAmelCase ( *A , **A ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : Optional[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
snake_case : Tuple = np.array(__UpperCamelCase )
snake_case : int = npimg.shape
return {"hash": hashimage(__UpperCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
_snake_case = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_snake_case = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase ( self , A , A , A ) -> Tuple:
snake_case : Any = MaskGenerationPipeline(model=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase ( self , A , A ) -> Optional[Any]:
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@slow
@require_torch
def UpperCAmelCase ( self ) -> Dict:
snake_case : Any = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
snake_case : List[Any] = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=2_5_6 )
# Shortening by hashing
snake_case : Optional[int] = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(_SCREAMING_SNAKE_CASE ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.04_44},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.0_21},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.01_67},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.01_32},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.00_53},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.99_67},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_93},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.99_09},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.98_79},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.98_34},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.97_16},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.96_12},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.95_99},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.95_52},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.95_32},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.95_16},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.94_99},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.94_83},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.94_64},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_43},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.9_43},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.94_08},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.93_35},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.93_26},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.92_62},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.89_99},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.89_86},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.89_84},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.88_73},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (4_8_0, 6_4_0)}, """scores""": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : int = """facebook/sam-vit-huge"""
snake_case : Dict = pipeline("""mask-generation""" , model=_SCREAMING_SNAKE_CASE )
snake_case : Any = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=2_5_6 )
# Shortening by hashing
snake_case : Optional[Any] = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(_SCREAMING_SNAKE_CASE ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.04_44},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.02_10},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.01_67},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.01_32},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_8_0, 6_4_0)}, """scores""": 1.00_53},
] , )
| 713 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 0 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __lowercase (ctypes.Structure ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
if os.name == "nt":
snake_case : str = CursorInfo()
snake_case : int = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowercase ,ctypes.byref(lowercase ) )
snake_case : Tuple = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowercase ,ctypes.byref(lowercase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def SCREAMING_SNAKE_CASE__ ( ) -> int:
if os.name == "nt":
snake_case : List[Any] = CursorInfo()
snake_case : Optional[int] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowercase ,ctypes.byref(lowercase ) )
snake_case : List[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowercase ,ctypes.byref(lowercase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 714 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase=False ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
snake_case : List[Any] = len(set_a.intersection(SCREAMING_SNAKE_CASE_ ) )
if alternative_union:
snake_case : List[str] = len(SCREAMING_SNAKE_CASE_ ) + len(SCREAMING_SNAKE_CASE_ )
else:
snake_case : Union[str, Any] = len(set_a.union(SCREAMING_SNAKE_CASE_ ) )
return intersection / union
if isinstance(SCREAMING_SNAKE_CASE_ ,(list, tuple) ) and isinstance(SCREAMING_SNAKE_CASE_ ,(list, tuple) ):
snake_case : List[str] = [element for element in set_a if element in set_b]
if alternative_union:
snake_case : str = len(SCREAMING_SNAKE_CASE_ ) + len(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) / union
else:
snake_case : List[str] = set_a + [element for element in set_b if element not in set_a]
return len(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
return None
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = {'a', 'b', 'c', 'd', 'e'}
lowerCamelCase : Optional[Any] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 715 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 0 |
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : List[str] = min(_lowerCAmelCase ) # min() finds the minimum value
snake_case : List[Any] = max(_lowerCAmelCase ) # max() finds the maximum value
snake_case : Union[str, Any] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
snake_case : List[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCAmelCase ,_lowerCAmelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
snake_case : Union[str, Any] = 0
for count in range(_lowerCAmelCase ):
while holes[count] > 0:
holes[count] -= 1
snake_case : Dict = count + min_val
i += 1
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
snake_case : Optional[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCAmelCase )
print("""Sorted order is:""" ,""" """.join(_lowerCAmelCase ) )
if __name__ == "__main__":
main()
| 716 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : List[Any] = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __lowercase (UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
_snake_case = "pixel_values"
_snake_case = False
_snake_case = TimmBackboneConfig
def __init__( self , A , **A ) -> Dict:
requires_backends(self , """timm""" )
super().__init__(lowerCamelCase__ )
snake_case : Any = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
snake_case : Optional[Any] = getattr(lowerCamelCase__ , """use_pretrained_backbone""" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
snake_case : int = config.out_indices if getattr(lowerCamelCase__ , """out_indices""" , lowerCamelCase__ ) is not None else (-1,)
snake_case : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
snake_case : List[str] = self._backbone.return_layers
snake_case : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def UpperCAmelCase ( cls , A , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
snake_case : Any = kwargs.pop("""config""" , TimmBackboneConfig() )
snake_case : Dict = kwargs.pop("""use_timm_backbone""" , lowerCamelCase__ )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
snake_case : str = kwargs.pop("""num_channels""" , config.num_channels )
snake_case : Dict = kwargs.pop("""features_only""" , config.features_only )
snake_case : str = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
snake_case : Optional[Any] = kwargs.pop("""out_indices""" , config.out_indices )
snake_case : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def UpperCAmelCase ( self , A ) -> Optional[int]:
pass
def UpperCAmelCase ( self , A , A=None , A=None , A=None , **A ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
snake_case : Any = return_dict if return_dict is not None else self.config.use_return_dict
snake_case : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
snake_case : Optional[int] = self._all_layers
snake_case : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
snake_case : List[Any] = self._return_layers
snake_case : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
snake_case : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
snake_case : Tuple = None
snake_case : Dict = tuple(lowerCamelCase__ )
snake_case : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
snake_case : Dict = (feature_maps,)
if output_hidden_states:
snake_case : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ ) | 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[str]:
snake_case : List[Any] = Mock()
snake_case : Optional[int] = conn, Mock()
snake_case : str = iter([1, None] )
snake_case : str = lambda lowercase : next(lowercase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" ,testing=lowercase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 719 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 | 0 |
import doctest
from collections import deque
import numpy as np
class __lowercase :
"""simple docstring"""
def __init__( self ) -> None:
snake_case : Dict = [2, 1, 2, -1]
snake_case : str = [1, 2, 3, 4]
def UpperCAmelCase ( self ) -> list[float]:
snake_case : List[Any] = len(self.first_signal )
snake_case : Union[str, Any] = len(self.second_signal )
snake_case : Union[str, Any] = max(__a , __a )
# create a zero matrix of max_length x max_length
snake_case : List[str] = [[0] * max_length for i in range(__a )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__a ):
snake_case : Any = deque(self.second_signal )
rotated_signal.rotate(__a )
for j, item in enumerate(__a ):
matrix[i][j] += item
# multiply the matrix with the first signal
snake_case : Tuple = np.matmul(np.transpose(__a ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__a , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 720 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 0 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
for param in module.parameters():
snake_case : Dict = False
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
snake_case : Tuple = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
snake_case : List[str] = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues"""
""" with generations.""" )
return device
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : List[str] = plt.imshow(_UpperCamelCase )
fig.axes.get_xaxis().set_visible(_UpperCamelCase )
fig.axes.get_yaxis().set_visible(_UpperCamelCase )
plt.show()
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
snake_case : List[Any] = datetime.now()
snake_case : Union[str, Any] = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 721 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 | 0 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCamelCase : Optional[int] = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
snake_case : Optional[int] = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" ,type=lowercase ,default="""wikitext""" ,help="""Name of the training. Explore datasets at: hf.co/datasets.""" ,)
parser.add_argument(
"""--dataset_config""" ,type=lowercase ,default="""wikitext-103-raw-v1""" ,help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" ,type=lowercase ,default="""sayakpaul/unigram-tokenizer-wikitext""" ,help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" ,)
parser.add_argument(
"""--shard_size""" ,type=lowercase ,default=1000 ,help="""Number of entries to go in a single shard.""" ,)
parser.add_argument("""--split""" ,type=lowercase ,default="""train""" ,choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" ,default=lowercase ,type=lowercase ,help="""Limit the number of shards (used for debugging).""" ,)
parser.add_argument(
"""--max_length""" ,type=lowercase ,default=512 ,help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" ,)
parser.add_argument(
"""--output_dir""" ,default="""tf-tpu""" ,type=lowercase ,help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" ,)
snake_case : List[str] = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
def fn(lowercase ):
return tokenizer(examples["""text"""] )
return fn
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Tuple = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
snake_case : Union[str, Any] = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
snake_case : Optional[int] = tf.train.Features(feature=lowercase )
snake_case : str = tf.train.Example(features=lowercase )
snake_case : Union[str, Any] = example.SerializeToString()
records.append(lowercase )
return records
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : Any = datasets.load_dataset(args.dataset_name ,args.dataset_config ,split=args.split )
if args.limit is not None:
snake_case : Dict = min(len(lowercase ) ,args.limit )
snake_case : Any = dataset.select(range(lowercase ) )
print(f"""Limiting the dataset to {args.limit} entries.""" )
snake_case : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
snake_case : Tuple = os.path.join(args.output_dir ,args.split )
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
else:
snake_case : int = os.path.join(args.output_dir ,args.split )
# Tokenize the whole dataset at once.
snake_case : Any = tokenize_function(lowercase )
snake_case : List[Any] = dataset.map(lowercase ,batched=lowercase ,num_proc=4 ,remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase ):
# Concatenate all texts.
snake_case : List[Any] = {k: sum(examples[k] ,[] ) for k in examples.keys()}
snake_case : List[str] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
snake_case : Tuple = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
snake_case : Optional[int] = {
k: [t[i : i + args.max_length] for i in range(0 ,lowercase ,args.max_length )]
for k, t in concatenated_examples.items()
}
return result
snake_case : Optional[Any] = dataset_tokenized.map(lowercase ,batched=lowercase ,batch_size=1000 ,num_proc=4 )
snake_case : List[Any] = 0
snake_case : List[Any] = 0
for shard in range(0 ,len(lowercase ) ,args.shard_size ):
snake_case : Optional[Any] = grouped_dataset[shard : shard + args.shard_size]
snake_case : Tuple = len(dataset_snapshot["""input_ids"""] )
snake_case : Any = os.path.join(lowercase ,f"""dataset-{shard_count}-{records_containing}.tfrecord""" )
snake_case : Optional[Any] = get_serialized_examples(lowercase )
with tf.io.TFRecordWriter(lowercase ) as out_file:
for i in range(len(lowercase ) ):
snake_case : List[str] = serialized_examples[i]
out_file.write(lowercase )
print("""Wrote file {} containing {} records""".format(lowercase ,lowercase ) )
shard_count += 1
total_records += records_containing
with open(f"""split-{args.split}-records-count.txt""" ,"""w""" ) as f:
print(f"""Total {args.split} records: {total_records}""" ,file=lowercase )
if __name__ == "__main__":
lowerCamelCase : str = parse_args()
main(args)
| 700 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 0 |
from math import isqrt
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> bool:
return all(number % divisor != 0 for divisor in range(2 ,isqrt(_SCREAMING_SNAKE_CASE ) + 1 ) )
def SCREAMING_SNAKE_CASE__ ( lowercase = 10**6 ) -> int:
snake_case : Tuple = 0
snake_case : List[Any] = 1
snake_case : str = 7
while prime_candidate < max_prime:
primes_count += is_prime(_SCREAMING_SNAKE_CASE )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 701 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Any = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[str]:
snake_case : Dict = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1E-5,
"""token_type_vocab_size""": 2,
}
snake_case : Any = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
snake_case : Optional[Any] = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] ,num_layers=predefined_args["""num_layers"""] ,units=predefined_args["""units"""] ,hidden_size=predefined_args["""hidden_size"""] ,max_length=predefined_args["""max_length"""] ,num_heads=predefined_args["""num_heads"""] ,scaled=predefined_args["""scaled"""] ,dropout=predefined_args["""dropout"""] ,output_attention=lowercase__ ,output_all_encodings=lowercase__ ,use_residual=predefined_args["""use_residual"""] ,activation=predefined_args.get("""activation""" ,"""gelu""" ) ,layer_norm_eps=predefined_args.get("""layer_norm_eps""" ,lowercase__ ) ,)
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
snake_case : List[Any] = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
snake_case : Optional[int] = os.path.join(get_home_dir() ,"""models""" )
snake_case : int = _load_vocab(lowercase__ ,lowercase__ ,lowercase__ ,cls=lowercase__ )
snake_case : Optional[int] = nlp.model.BERTModel(
lowercase__ ,len(lowercase__ ) ,units=predefined_args["""units"""] ,embed_size=predefined_args["""embed_size"""] ,embed_dropout=predefined_args["""embed_dropout"""] ,word_embed=predefined_args["""word_embed"""] ,use_pooler=lowercase__ ,use_token_type_embed=lowercase__ ,token_type_vocab_size=predefined_args["""token_type_vocab_size"""] ,use_classifier=lowercase__ ,use_decoder=lowercase__ ,)
original_bort.load_parameters(lowercase__ ,cast_dtype=lowercase__ ,ignore_extra=lowercase__ )
snake_case : Any = original_bort._collect_params_with_prefix()
# Build our config 🤗
snake_case : List[str] = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(lowercase__ ),
}
snake_case : List[Any] = BertConfig.from_dict(lowercase__ )
snake_case : Tuple = BertForMaskedLM(lowercase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowercase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowercase ,lowercase ):
snake_case : Any = hf_param.shape
snake_case : Optional[Any] = to_torch(params[gluon_param] )
snake_case : Tuple = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
snake_case : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight ,"""word_embed.0.weight""" )
snake_case : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight ,"""encoder.position_weight""" )
snake_case : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias ,"""encoder.layer_norm.beta""" )
snake_case : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight ,"""encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
snake_case : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
snake_case : Any = hf_bort_model.bert.encoder.layer[i]
# self attention
snake_case : int = layer.attention.self
snake_case : Tuple = check_and_map_params(
self_attn.key.bias.data ,f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
snake_case : List[str] = check_and_map_params(
self_attn.key.weight.data ,f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
snake_case : Any = check_and_map_params(
self_attn.query.bias.data ,f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
snake_case : Optional[Any] = check_and_map_params(
self_attn.query.weight.data ,f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
snake_case : int = check_and_map_params(
self_attn.value.bias.data ,f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
snake_case : Tuple = check_and_map_params(
self_attn.value.weight.data ,f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
snake_case : int = layer.attention.output
snake_case : Tuple = check_and_map_params(
self_output.dense.bias ,f"""encoder.transformer_cells.{i}.proj.bias""" )
snake_case : List[str] = check_and_map_params(
self_output.dense.weight ,f"""encoder.transformer_cells.{i}.proj.weight""" )
snake_case : str = check_and_map_params(
self_output.LayerNorm.bias ,f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
snake_case : int = check_and_map_params(
self_output.LayerNorm.weight ,f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
snake_case : Optional[int] = layer.intermediate
snake_case : Optional[Any] = check_and_map_params(
intermediate.dense.bias ,f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
snake_case : Dict = check_and_map_params(
intermediate.dense.weight ,f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
snake_case : int = layer.output
snake_case : Optional[int] = check_and_map_params(
bert_output.dense.bias ,f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
snake_case : Any = check_and_map_params(
bert_output.dense.weight ,f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
snake_case : Optional[Any] = check_and_map_params(
bert_output.LayerNorm.bias ,f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
snake_case : str = check_and_map_params(
bert_output.LayerNorm.weight ,f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
snake_case : int = RobertaTokenizer.from_pretrained("""roberta-base""" )
snake_case : Optional[Any] = tokenizer.encode_plus(lowercase__ )["""input_ids"""]
# Get gluon output
snake_case : str = mx.nd.array([input_ids] )
snake_case : Dict = original_bort(inputs=lowercase__ ,token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowercase__ )
snake_case : Any = BertModel.from_pretrained(lowercase__ )
hf_bort_model.eval()
snake_case : str = tokenizer.encode_plus(lowercase__ ,return_tensors="""pt""" )
snake_case : str = hf_bort_model(**lowercase__ )[0]
snake_case : Tuple = output_gluon[0].asnumpy()
snake_case : List[Any] = output_hf[0].detach().numpy()
snake_case : Any = np.max(np.abs(hf_layer - gluon_layer ) ).item()
snake_case : Any = np.allclose(lowercase__ ,lowercase__ ,atol=1E-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" ,lowercase__ )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Any = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 0 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : List[str] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCamelCase : Tuple = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowerCamelCase : Any = {
'allenai/led-base-16384': 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
snake_case : Dict = (
list(range(ord("""!""" ) ,ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) ,ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) ,ord("""ÿ""" ) + 1 ) )
)
snake_case : int = bs[:]
snake_case : Optional[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase_ )
cs.append(2**8 + n )
n += 1
snake_case : int = [chr(lowerCAmelCase_ ) for n in cs]
return dict(zip(lowerCAmelCase_ ,lowerCAmelCase_ ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : Dict = set()
snake_case : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case : Tuple = char
return pairs
class __lowercase (a__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , **A , ) -> List[str]:
snake_case : Tuple = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else bos_token
snake_case : Dict = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else eos_token
snake_case : Union[str, Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else sep_token
snake_case : Optional[int] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else cls_token
snake_case : Tuple = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else unk_token
snake_case : Any = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case : str = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
super().__init__(
errors=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , )
with open(lowercase__ , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(lowercase__ )
snake_case : Any = {v: k for k, v in self.encoder.items()}
snake_case : Dict = errors # how to handle errors in decoding
snake_case : Optional[int] = bytes_to_unicode()
snake_case : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(lowercase__ , encoding="""utf-8""" ) as merges_handle:
snake_case : Tuple = merges_handle.read().split("""\n""" )[1:-1]
snake_case : Tuple = [tuple(merge.split() ) for merge in bpe_merges]
snake_case : int = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case : List[Any] = {}
snake_case : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case : Optional[int] = re.compile(r"""\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCAmelCase ( self ) -> Optional[int]:
return len(self.encoder )
def UpperCAmelCase ( self ) -> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , A ) -> List[Any]:
if token in self.cache:
return self.cache[token]
snake_case : Optional[int] = tuple(lowercase__ )
snake_case : List[str] = get_pairs(lowercase__ )
if not pairs:
return token
while True:
snake_case : Dict = min(lowercase__ , key=lambda A : self.bpe_ranks.get(lowercase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case : Any = bigram
snake_case : Union[str, Any] = []
snake_case : Tuple = 0
while i < len(lowercase__ ):
try:
snake_case : Union[str, Any] = word.index(lowercase__ , lowercase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case : Union[str, Any] = j
if word[i] == first and i < len(lowercase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case : str = tuple(lowercase__ )
snake_case : Any = new_word
if len(lowercase__ ) == 1:
break
else:
snake_case : str = get_pairs(lowercase__ )
snake_case : Union[str, Any] = ''' '''.join(lowercase__ )
snake_case : Optional[int] = word
return word
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : List[str] = []
for token in re.findall(self.pat , lowercase__ ):
snake_case : Optional[int] = ''''''.join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase__ ).split(""" """ ) )
return bpe_tokens
def UpperCAmelCase ( self , A ) -> List[str]:
return self.encoder.get(lowercase__ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.decoder.get(lowercase__ )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
snake_case : Any = ''''''.join(lowercase__ )
snake_case : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(lowercase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Tuple = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case : List[str] = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase__ , ensure_ascii=lowercase__ ) + """\n""" )
snake_case : Dict = 0
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
snake_case : Optional[Any] = token_index
writer.write(""" """.join(lowercase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case : str = [self.cls_token_id]
snake_case : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase__ )) + [1]
return [1] + ([0] * len(lowercase__ )) + [1, 1] + ([0] * len(lowercase__ )) + [1]
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : Optional[Any] = [self.sep_token_id]
snake_case : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase ( self , A , A=False , **A ) -> List[Any]:
snake_case : Union[str, Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowercase__ ) > 0 and not text[0].isspace()):
snake_case : Dict = ''' ''' + text
return (text, kwargs)
def UpperCAmelCase ( self , A , A = None , A = PaddingStrategy.DO_NOT_PAD , A = None , A = None , ) -> dict:
snake_case : int = super()._pad(
encoded_inputs=lowercase__ , max_length=lowercase__ , padding_strategy=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , )
# Load from model defaults
if return_attention_mask is None:
snake_case : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case : Optional[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case : Dict = len(encoded_inputs["""global_attention_mask"""] ) != len(lowercase__ )
if needs_to_be_padded:
snake_case : Any = len(lowercase__ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case : Any = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
snake_case : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 703 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 684 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Tuple:
snake_case : str = word.split()
def justify(lowercase ,lowercase ,lowercase ) -> str:
snake_case : Dict = max_width - width
snake_case : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
snake_case : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
snake_case : str = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
snake_case : Tuple = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(SCREAMING_SNAKE_CASE__ ):
num_spaces_between_words_list[i] += 1
snake_case : Union[str, Any] = []
for i in range(SCREAMING_SNAKE_CASE__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(SCREAMING_SNAKE_CASE__ )
snake_case : List[Any] = []
snake_case : list[str] = []
snake_case : Tuple = 0
for word in words:
if width + len(SCREAMING_SNAKE_CASE__ ) + len(SCREAMING_SNAKE_CASE__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(SCREAMING_SNAKE_CASE__ )
width += len(SCREAMING_SNAKE_CASE__ )
else:
# justify the line and add it to result
answer.append(justify(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )
# reset new line and new width
snake_case : List[Any] = [word], len(SCREAMING_SNAKE_CASE__ )
snake_case : List[str] = max_width - width - len(SCREAMING_SNAKE_CASE__ )
answer.append(""" """.join(SCREAMING_SNAKE_CASE__ ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spm_char.model'}
lowerCamelCase : List[str] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCamelCase : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Tuple = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
snake_case : Dict = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
snake_case : Any = [1]
if token_ids_a is None:
return ([0] * len(A )) + suffix_ones
return ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Union[str, Any]:
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase = 1000 ) -> Dict:
return sum(e for e in range(3 ,lowercase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 706 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
from collections import defaultdict
class __lowercase :
"""simple docstring"""
def __init__( self , A , A ) -> Optional[Any]:
snake_case : Optional[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
snake_case : Tuple = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(__a ) )
]
snake_case : List[Any] = defaultdict(__a ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
snake_case : List[Any] = (1 << len(__a )) - 1
def UpperCAmelCase ( self , A , A ) -> Optional[int]:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
snake_case : Optional[Any] = self.count_ways_until(__a , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
snake_case : Union[str, Any] = total_ways_util
return self.dp[mask][task_no]
def UpperCAmelCase ( self , A ) -> List[str]:
# Store the list of persons for each task
for i in range(len(__a ) ):
for j in task_performed[i]:
self.task[j].append(__a )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
lowerCamelCase : Dict = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 707 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : List[str] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCamelCase : Optional[int] = False
lowerCamelCase : Optional[Any] = True
lowerCamelCase : List[str] = False
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
lowerCamelCase : str = parser.parse_args()
lowerCamelCase : Dict = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
lowerCamelCase : List[str] = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
lowerCamelCase : Optional[int] = """""" if has_file(args.repo_path, 'config.json') else """unet"""
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
lowerCamelCase : List[str] = reader.read()
lowerCamelCase : Union[str, Any] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
lowerCamelCase : Tuple = UNetaDModel(**config)
else:
lowerCamelCase : Union[str, Any] = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
lowerCamelCase : Any = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCamelCase : Dict = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCamelCase : List[str] = config[key]
del config[key]
lowerCamelCase : Optional[int] = [k.replace('UNetRes', '') for k in config["""down_block_types"""]]
lowerCamelCase : Any = [k.replace('UNetRes', '') for k in config["""up_block_types"""]]
if do_only_weights:
lowerCamelCase : Optional[int] = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
lowerCamelCase : Any = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
lowerCamelCase : List[str] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
lowerCamelCase : int = param_value
lowerCamelCase : Dict = True
if not has_changed:
lowerCamelCase : List[Any] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 709 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
snake_case : Tuple = tempfile.mkdtemp()
snake_case : List[str] = BlipImageProcessor()
snake_case : List[Any] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
snake_case : List[str] = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
snake_case : Any = InstructBlipProcessor(_a , _a , _a )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **A ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).tokenizer
def UpperCAmelCase ( self , **A ) -> Union[str, Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).image_processor
def UpperCAmelCase ( self , **A ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **_a ).qformer_tokenizer
def UpperCAmelCase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : int = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case : Optional[int] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Union[str, Any] = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
snake_case : List[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case : str = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
snake_case : Dict = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
self.assertIsInstance(processor.qformer_tokenizer , _a )
def UpperCAmelCase ( self ) -> Any:
snake_case : Tuple = self.get_image_processor()
snake_case : List[str] = self.get_tokenizer()
snake_case : Tuple = self.get_qformer_tokenizer()
snake_case : Optional[int] = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
snake_case : Union[str, Any] = self.prepare_image_inputs()
snake_case : str = image_processor(_a , return_tensors="""np""" )
snake_case : Optional[int] = processor(images=_a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self ) -> Any:
snake_case : Optional[int] = self.get_image_processor()
snake_case : Tuple = self.get_tokenizer()
snake_case : Any = self.get_qformer_tokenizer()
snake_case : str = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
snake_case : Union[str, Any] = """lower newer"""
snake_case : Dict = processor(text=_a )
snake_case : Optional[int] = tokenizer(_a , return_token_type_ids=_a )
snake_case : int = qformer_tokenizer(_a , return_token_type_ids=_a )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Dict = self.get_image_processor()
snake_case : str = self.get_tokenizer()
snake_case : List[Any] = self.get_qformer_tokenizer()
snake_case : str = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
snake_case : Any = """lower newer"""
snake_case : Union[str, Any] = self.prepare_image_inputs()
snake_case : List[Any] = processor(text=_a , images=_a )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def UpperCAmelCase ( self ) -> str:
snake_case : str = self.get_image_processor()
snake_case : List[Any] = self.get_tokenizer()
snake_case : List[Any] = self.get_qformer_tokenizer()
snake_case : Dict = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
snake_case : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case : List[Any] = processor.batch_decode(_a )
snake_case : List[str] = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Tuple = self.get_image_processor()
snake_case : Union[str, Any] = self.get_tokenizer()
snake_case : int = self.get_qformer_tokenizer()
snake_case : Dict = InstructBlipProcessor(
tokenizer=_a , image_processor=_a , qformer_tokenizer=_a )
snake_case : Union[str, Any] = """lower newer"""
snake_case : Any = self.prepare_image_inputs()
snake_case : List[Any] = processor(text=_a , images=_a )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 710 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 0 |
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
snake_case : Optional[int] = torch.nn.Linear(2 ,4 )
snake_case : int = torch.optim.AdamW(model.parameters() ,lr=1.0 )
snake_case : Any = torch.optim.lr_scheduler.OneCycleLR(__UpperCamelCase ,max_lr=0.01 ,steps_per_epoch=2 ,epochs=1 )
snake_case : Optional[Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
snake_case : Union[str, Any] = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : Union[str, Any] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(__UpperCamelCase )
class __lowercase (SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@require_cuda
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Any = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(UpperCamelCase__ ):
snake_case : Dict = Accelerator(cpu=UpperCamelCase__ )
def UpperCAmelCase ( self ) -> str:
snake_case : Optional[int] = Accelerator()
snake_case : str = GradientState()
assert state.num_steps == 1
snake_case : Optional[Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
snake_case : int = False
assert state.sync_gradients is False
GradientState._reset_state()
def UpperCAmelCase ( self ) -> int:
snake_case : Optional[int] = Accelerator()
snake_case : Dict = create_components()
(
snake_case
) : Any = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def UpperCAmelCase ( self ) -> int:
snake_case : Dict = Accelerator()
snake_case : str = create_components()
accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def UpperCAmelCase ( self ) -> Optional[Any]:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*A , **A ):
pass
with patch("""torch.cuda.set_device""" , UpperCamelCase__ ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
snake_case : Optional[int] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def UpperCAmelCase ( self ) -> str:
snake_case : Tuple = Accelerator()
snake_case : Any = create_components()
accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[Any] = get_signature(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCamelCase__ )
# make sure random weights don't match
load_random_weights(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) < 1e-3 )
def UpperCAmelCase ( self ) -> Any:
snake_case : List[Any] = Accelerator()
snake_case : Optional[int] = create_components()
accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : Union[str, Any] = get_signature(UpperCamelCase__ )
# saving hook
def save_config(A , A , A ):
snake_case : Tuple = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(UpperCamelCase__ , """data.json""" ) , """w""" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# loading hook
def load_config(A , A ):
with open(os.path.join(UpperCamelCase__ , """data.json""" ) , """r""" ) as f:
snake_case : Optional[int] = json.load(UpperCamelCase__ )
snake_case : List[Any] = config['''class_name''']
snake_case : Dict = accelerator.register_save_state_pre_hook(UpperCamelCase__ )
snake_case : Optional[int] = accelerator.register_load_state_pre_hook(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCamelCase__ )
# make sure random weights don't match with hooks
load_random_weights(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
snake_case : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCamelCase__ )
# make sure random weights don't match with hooks removed
load_random_weights(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
snake_case : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(UpperCamelCase__ )
self.assertTrue(abs(model_signature - get_signature(UpperCamelCase__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : int = Accelerator()
snake_case : Tuple = create_components()
snake_case : List[Any] = None
# This should work
snake_case : Tuple = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertTrue(dummy_obj is None )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : str = Accelerator()
snake_case : int = create_components()
snake_case : Any = [1, 2, 3]
# This should work
snake_case : List[Any] = accelerator.prepare(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(
getattr(UpperCamelCase__ , """_is_accelerate_prepared""" , UpperCamelCase__ ) , UpperCamelCase__ , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(UpperCamelCase__ , """_is_accelerate_prepared""" , UpperCamelCase__ ) , UpperCamelCase__ , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(UpperCamelCase__ , """_is_accelerate_prepared""" , UpperCamelCase__ ) , UpperCamelCase__ , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(UpperCamelCase__ , """_is_accelerate_prepared""" , UpperCamelCase__ ) , UpperCamelCase__ , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(UpperCamelCase__ , """_is_accelerate_prepared""" , UpperCamelCase__ ) , UpperCamelCase__ , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(UpperCamelCase__ , """_is_accelerate_prepared""" , UpperCamelCase__ ) , UpperCamelCase__ , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def UpperCAmelCase ( self ) -> Dict:
from transformers import AutoModelForCausalLM
snake_case : Tuple = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=UpperCamelCase__ , device_map={"""""": 0} , )
snake_case : Optional[int] = Accelerator()
# This should work
snake_case : Optional[Any] = accelerator.prepare(UpperCamelCase__ )
@slow
@require_bnb
def UpperCAmelCase ( self ) -> int:
from transformers import AutoModelForCausalLM
snake_case : List[str] = Accelerator()
with init_empty_weights():
snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
snake_case : Any = infer_auto_device_map(UpperCamelCase__ )
snake_case : Any = '''cpu'''
snake_case : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , llm_inta_enable_fpaa_cpu_offload=UpperCamelCase__ )
# This should not work and get value error
with self.assertRaises(UpperCamelCase__ ):
snake_case : Union[str, Any] = accelerator.prepare(UpperCamelCase__ )
@slow
@require_bnb
@require_multi_gpu
def UpperCAmelCase ( self ) -> Any:
from transformers import AutoModelForCausalLM
snake_case : Any = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
snake_case : Tuple = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
snake_case : Dict = infer_auto_device_map(UpperCamelCase__ )
snake_case : Dict = 1
snake_case : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=UpperCamelCase__ , device_map=UpperCamelCase__ , )
snake_case : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(UpperCamelCase__ ):
snake_case : Dict = accelerator.prepare(UpperCamelCase__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def UpperCAmelCase ( self ) -> Optional[Any]:
from transformers import AutoModelForCausalLM
with init_empty_weights():
snake_case : str = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
snake_case : Tuple = infer_auto_device_map(UpperCamelCase__ )
snake_case : Optional[int] = 1
snake_case : str = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=UpperCamelCase__ , device_map=UpperCamelCase__ , )
snake_case : List[Any] = Accelerator()
# This should work
snake_case : List[Any] = accelerator.prepare(UpperCamelCase__ )
@require_cuda
def UpperCAmelCase ( self ) -> str:
snake_case : int = torch.nn.Linear(1_0 , 1_0 )
snake_case : int = torch.optim.SGD(model.parameters() , lr=0.01 )
snake_case : str = Accelerator(cpu=UpperCamelCase__ )
snake_case : Optional[Any] = accelerator.prepare(UpperCamelCase__ )
| 711 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
lowerCamelCase : Tuple = logging.get_logger(__name__)
# General docstring
lowerCamelCase : Tuple = 'MobileNetV1Config'
# Base docstring
lowerCamelCase : Optional[int] = 'google/mobilenet_v1_1.0_224'
lowerCamelCase : Optional[int] = [1, 1_0_2_4, 7, 7]
# Image classification docstring
lowerCamelCase : int = 'google/mobilenet_v1_1.0_224'
lowerCamelCase : Union[str, Any] = 'tabby, tabby cat'
lowerCamelCase : str = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase=None ) -> List[Any]:
snake_case : Optional[Any] = {}
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
snake_case : Dict = model.mobilenet_va
else:
snake_case : Union[str, Any] = model
snake_case : List[str] = """MobilenetV1/Conv2d_0/"""
snake_case : int = backbone.conv_stem.convolution.weight
snake_case : Optional[Any] = backbone.conv_stem.normalization.bias
snake_case : Union[str, Any] = backbone.conv_stem.normalization.weight
snake_case : Optional[Any] = backbone.conv_stem.normalization.running_mean
snake_case : Any = backbone.conv_stem.normalization.running_var
for i in range(13 ):
snake_case : Any = i + 1
snake_case : List[str] = i * 2
snake_case : Dict = backbone.layer[pt_index]
snake_case : Any = f"""MobilenetV1/Conv2d_{tf_index}_depthwise/"""
snake_case : str = pointer.convolution.weight
snake_case : Any = pointer.normalization.bias
snake_case : Optional[int] = pointer.normalization.weight
snake_case : int = pointer.normalization.running_mean
snake_case : Union[str, Any] = pointer.normalization.running_var
snake_case : Optional[Any] = backbone.layer[pt_index + 1]
snake_case : List[Any] = f"""MobilenetV1/Conv2d_{tf_index}_pointwise/"""
snake_case : int = pointer.convolution.weight
snake_case : Tuple = pointer.normalization.bias
snake_case : Optional[int] = pointer.normalization.weight
snake_case : int = pointer.normalization.running_mean
snake_case : int = pointer.normalization.running_var
if isinstance(lowerCamelCase_ ,lowerCamelCase_ ):
snake_case : Tuple = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
snake_case : Dict = model.classifier.weight
snake_case : List[Any] = model.classifier.bias
return tf_to_pt_map
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Optional[Any]:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
snake_case : Union[str, Any] = tf.train.list_variables(lowerCamelCase_ )
snake_case : Optional[Any] = {}
for name, shape in init_vars:
logger.info(f"""Loading TF weight {name} with shape {shape}""" )
snake_case : Dict = tf.train.load_variable(lowerCamelCase_ ,lowerCamelCase_ )
snake_case : Optional[int] = array
# Build TF to PyTorch weights loading map
snake_case : Any = _build_tf_to_pytorch_map(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(f"""Importing {name}""" )
if name not in tf_weights:
logger.info(f"""{name} not in tf pre-trained weights, skipping""" )
continue
snake_case : Optional[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
snake_case : int = np.transpose(lowerCamelCase_ ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
snake_case : Optional[Any] = array.squeeze().transpose()
else:
snake_case : List[str] = np.transpose(lowerCamelCase_ ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"""Pointer shape {pointer.shape} and array shape {array.shape} mismatched""" )
logger.info(f"""Initialize PyTorch weight {name} {array.shape}""" )
snake_case : List[str] = torch.from_numpy(lowerCamelCase_ )
tf_weights.pop(lowerCamelCase_ ,lowerCamelCase_ )
tf_weights.pop(name + """/RMSProp""" ,lowerCamelCase_ )
tf_weights.pop(name + """/RMSProp_1""" ,lowerCamelCase_ )
tf_weights.pop(name + """/ExponentialMovingAverage""" ,lowerCamelCase_ )
logger.info(f"""Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}""" )
return model
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Union[str, Any]:
snake_case , snake_case : Dict = features.shape[-2:]
snake_case , snake_case : int = conv_layer.stride
snake_case , snake_case : int = conv_layer.kernel_size
if in_height % stride_height == 0:
snake_case : Optional[Any] = max(kernel_height - stride_height ,0 )
else:
snake_case : Optional[int] = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
snake_case : Dict = max(kernel_width - stride_width ,0 )
else:
snake_case : Any = max(kernel_width - (in_width % stride_width) ,0 )
snake_case : Optional[int] = pad_along_width // 2
snake_case : Optional[int] = pad_along_width - pad_left
snake_case : Optional[int] = pad_along_height // 2
snake_case : Dict = pad_along_height - pad_top
snake_case : Dict = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCamelCase_ ,lowerCamelCase_ ,"""constant""" ,0.0 )
class __lowercase (nn.Module ):
"""simple docstring"""
def __init__( self , A , A , A , A , A = 1 , A = 1 , A = False , A = True , A = True , ) -> None:
super().__init__()
snake_case : Any = config
if in_channels % groups != 0:
raise ValueError(f"""Input channels ({in_channels}) are not divisible by {groups} groups.""" )
if out_channels % groups != 0:
raise ValueError(f"""Output channels ({out_channels}) are not divisible by {groups} groups.""" )
snake_case : Tuple = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
snake_case : int = nn.Convad(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=_lowerCamelCase , stride=_lowerCamelCase , padding=_lowerCamelCase , groups=_lowerCamelCase , bias=_lowerCamelCase , padding_mode="""zeros""" , )
if use_normalization:
snake_case : Dict = nn.BatchNormad(
num_features=_lowerCamelCase , eps=config.layer_norm_eps , momentum=0.99_97 , affine=_lowerCamelCase , track_running_stats=_lowerCamelCase , )
else:
snake_case : Dict = None
if use_activation:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
snake_case : Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act , _lowerCamelCase ):
snake_case : List[str] = ACTaFN[config.hidden_act]
else:
snake_case : int = config.hidden_act
else:
snake_case : Optional[Any] = None
def UpperCAmelCase ( self , A ) -> torch.Tensor:
if self.config.tf_padding:
snake_case : List[str] = apply_tf_padding(_lowerCamelCase , self.convolution )
snake_case : int = self.convolution(_lowerCamelCase )
if self.normalization is not None:
snake_case : List[str] = self.normalization(_lowerCamelCase )
if self.activation is not None:
snake_case : Optional[int] = self.activation(_lowerCamelCase )
return features
class __lowercase (lowerCAmelCase__ ):
"""simple docstring"""
_snake_case = MobileNetVaConfig
_snake_case = load_tf_weights_in_mobilenet_va
_snake_case = "mobilenet_v1"
_snake_case = "pixel_values"
_snake_case = False
def UpperCAmelCase ( self , A ) -> None:
if isinstance(_lowerCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_lowerCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
lowerCamelCase : Optional[int] = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCamelCase : Tuple = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , lowerCAmelCase__ , )
class __lowercase (lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , A , A = True ) -> str:
super().__init__(_lowerCamelCase )
snake_case : List[str] = config
snake_case : Any = 3_2
snake_case : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
snake_case : List[str] = MobileNetVaConvLayer(
_lowerCamelCase , in_channels=config.num_channels , out_channels=_lowerCamelCase , kernel_size=3 , stride=2 , )
snake_case : List[str] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
snake_case : str = nn.ModuleList()
for i in range(1_3 ):
snake_case : Union[str, Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
snake_case : Optional[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_lowerCamelCase , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=3 , stride=strides[i] , groups=_lowerCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
_lowerCamelCase , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=1 , ) )
snake_case : int = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCAmelCase ( self , A ) -> Tuple:
raise NotImplementedError
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase ( self , A = None , A = None , A = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
snake_case : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case : str = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
snake_case : Any = self.conv_stem(_lowerCamelCase )
snake_case : List[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
snake_case : Dict = layer_module(_lowerCamelCase )
if output_hidden_states:
snake_case : Dict = all_hidden_states + (hidden_states,)
snake_case : Optional[int] = hidden_states
if self.pooler is not None:
snake_case : Dict = torch.flatten(self.pooler(_lowerCamelCase ) , start_dim=1 )
else:
snake_case : Dict = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCamelCase , pooler_output=_lowerCamelCase , hidden_states=_lowerCamelCase , )
@add_start_docstrings(
"""\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n """ , lowerCAmelCase__ , )
class __lowercase (lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , A ) -> None:
super().__init__(_lowerCamelCase )
snake_case : List[str] = config.num_labels
snake_case : Tuple = MobileNetVaModel(_lowerCamelCase )
snake_case : Dict = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
snake_case : int = nn.Dropout(config.classifier_dropout_prob , inplace=_lowerCamelCase )
snake_case : List[str] = nn.Linear(_lowerCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase ( self , A = None , A = None , A = None , A = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
snake_case : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
snake_case : str = self.mobilenet_va(_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase )
snake_case : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
snake_case : Any = self.classifier(self.dropout(_lowerCamelCase ) )
snake_case : Dict = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case : Dict = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case : Any = """single_label_classification"""
else:
snake_case : List[Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
snake_case : Dict = MSELoss()
if self.num_labels == 1:
snake_case : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case : List[str] = loss_fct(_lowerCamelCase , _lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
snake_case : Optional[int] = CrossEntropyLoss()
snake_case : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case : Tuple = BCEWithLogitsLoss()
snake_case : Union[str, Any] = loss_fct(_lowerCamelCase , _lowerCamelCase )
if not return_dict:
snake_case : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_lowerCamelCase , logits=_lowerCamelCase , hidden_states=outputs.hidden_states , )
| 712 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class __lowercase (_A ):
"""simple docstring"""
def __init__( self , A ) -> List[Any]:
super().__init__()
snake_case : Union[str, Any] = nn.ModuleList(A )
def UpperCAmelCase ( self , A , A , A , A , A , A = None , A = None , A = None , A = None , A = False , A = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(A , A , self.nets ) ):
snake_case : List[str] = controlnet(
A , A , A , A , A , A , A , A , A , A , A , )
# merge samples
if i == 0:
snake_case : Optional[int] = down_samples, mid_sample
else:
snake_case : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A , A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCAmelCase ( self , A , A = True , A = None , A = False , A = None , ) -> Union[str, Any]:
snake_case : List[str] = 0
snake_case : Optional[int] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A , is_main_process=A , save_function=A , safe_serialization=A , variant=A , )
idx += 1
snake_case : List[Any] = model_path_to_save + f"""_{idx}"""
@classmethod
def UpperCAmelCase ( cls , A , **A ) -> Dict:
snake_case : List[Any] = 0
snake_case : int = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
snake_case : Any = pretrained_model_path
while os.path.isdir(A ):
snake_case : List[str] = ControlNetModel.from_pretrained(A , **A )
controlnets.append(A )
idx += 1
snake_case : Optional[Any] = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(A )} controlnets loaded from {pretrained_model_path}.""" )
if len(A ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(A )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(A )
| 713 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : List[str] = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 714 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
lowerCamelCase : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : Tuple = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase : List[str] = {
'unc-nlp/lxmert-base-uncased': 5_1_2,
}
lowerCamelCase : str = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = LxmertTokenizer
def __init__( self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ) -> Optional[int]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __A ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __A ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __A ) != tokenize_chinese_chars
):
snake_case : str = getattr(__A , normalizer_state.pop("""type""" ) )
snake_case : Optional[int] = do_lower_case
snake_case : str = strip_accents
snake_case : Tuple = tokenize_chinese_chars
snake_case : Union[str, Any] = normalizer_class(**__A )
snake_case : Tuple = do_lower_case
def UpperCAmelCase ( self , A , A=None ) -> List[Any]:
snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , A , A = None ) -> List[int]:
snake_case : str = [self.sep_token_id]
snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
snake_case : str = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 715 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 0 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCamelCase : Optional[int] = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCamelCase : Optional[int] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
snake_case : List[Any] = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) ,dtype=_UpperCAmelCase )[0]
@deprecated(_UpperCAmelCase ,"""Please use tf.data to implement this functionality.""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Union[str, Any]:
print("""Extracting""" ,f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
snake_case : Dict = _readaa(_UpperCAmelCase )
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
snake_case : Any = _readaa(_UpperCAmelCase )
snake_case : Any = _readaa(_UpperCAmelCase )
snake_case : Optional[Any] = _readaa(_UpperCAmelCase )
snake_case : int = bytestream.read(rows * cols * num_images )
snake_case : Optional[Any] = numpy.frombuffer(_UpperCAmelCase ,dtype=numpy.uinta )
snake_case : int = data.reshape(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,1 )
return data
@deprecated(_UpperCAmelCase ,"""Please use tf.one_hot on tensors.""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
snake_case : Optional[int] = labels_dense.shape[0]
snake_case : int = numpy.arange(_UpperCAmelCase ) * num_classes
snake_case : List[Any] = numpy.zeros((num_labels, num_classes) )
snake_case : Any = 1
return labels_one_hot
@deprecated(_UpperCAmelCase ,"""Please use tf.data to implement this functionality.""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=False ,lowercase=10 ) -> int:
print("""Extracting""" ,f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
snake_case : Optional[Any] = _readaa(_UpperCAmelCase )
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
snake_case : int = _readaa(_UpperCAmelCase )
snake_case : str = bytestream.read(_UpperCAmelCase )
snake_case : List[str] = numpy.frombuffer(_UpperCAmelCase ,dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCAmelCase ,_UpperCAmelCase )
return labels
class __lowercase :
"""simple docstring"""
@deprecated(
__SCREAMING_SNAKE_CASE , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self , A , A , A=False , A=False , A=dtypes.floataa , A=True , A=None , ) -> List[Any]:
snake_case , snake_case : Any = random_seed.get_seed(__SCREAMING_SNAKE_CASE )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
snake_case : int = dtypes.as_dtype(__SCREAMING_SNAKE_CASE ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
snake_case : int = 1_0_0_0_0
snake_case : Tuple = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"""images.shape: {images.shape} labels.shape: {labels.shape}"""
snake_case : str = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
snake_case : str = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
snake_case : List[Any] = images.astype(numpy.floataa )
snake_case : Optional[Any] = numpy.multiply(__SCREAMING_SNAKE_CASE , 1.0 / 2_5_5.0 )
snake_case : str = images
snake_case : List[Any] = labels
snake_case : Dict = 0
snake_case : Optional[int] = 0
@property
def UpperCAmelCase ( self ) -> Dict:
return self._images
@property
def UpperCAmelCase ( self ) -> Optional[int]:
return self._labels
@property
def UpperCAmelCase ( self ) -> Tuple:
return self._num_examples
@property
def UpperCAmelCase ( self ) -> Tuple:
return self._epochs_completed
def UpperCAmelCase ( self , A , A=False , A=True ) -> Optional[int]:
if fake_data:
snake_case : int = [1] * 7_8_4
snake_case : int = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__SCREAMING_SNAKE_CASE )],
[fake_label for _ in range(__SCREAMING_SNAKE_CASE )],
)
snake_case : List[str] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
snake_case : Optional[Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(__SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = self.images[perma]
snake_case : Union[str, Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
snake_case : Optional[Any] = self._num_examples - start
snake_case : Tuple = self._images[start : self._num_examples]
snake_case : List[Any] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
snake_case : List[str] = numpy.arange(self._num_examples )
numpy.random.shuffle(__SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = self.images[perm]
snake_case : Union[str, Any] = self.labels[perm]
# Start next epoch
snake_case : Dict = 0
snake_case : Dict = batch_size - rest_num_examples
snake_case : Union[str, Any] = self._index_in_epoch
snake_case : List[str] = self._images[start:end]
snake_case : Union[str, Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
snake_case : Dict = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCAmelCase ,"""Please write your own downloading logic.""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if not gfile.Exists(_UpperCAmelCase ):
gfile.MakeDirs(_UpperCAmelCase )
snake_case : Dict = os.path.join(_UpperCAmelCase ,_UpperCAmelCase )
if not gfile.Exists(_UpperCAmelCase ):
urllib.request.urlretrieve(_UpperCAmelCase ,_UpperCAmelCase ) # noqa: S310
with gfile.GFile(_UpperCAmelCase ) as f:
snake_case : Any = f.size()
print("""Successfully downloaded""" ,_UpperCAmelCase ,_UpperCAmelCase ,"""bytes.""" )
return filepath
@deprecated(
_UpperCAmelCase ,"""Please use alternatives such as:""" """ tensorflow_datasets.load(\'mnist\')""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=False ,lowercase=False ,lowercase=dtypes.floataa ,lowercase=True ,lowercase=5000 ,lowercase=None ,lowercase=DEFAULT_SOURCE_URL ,) -> Union[str, Any]:
if fake_data:
def fake():
return _DataSet(
[] ,[] ,fake_data=_UpperCAmelCase ,one_hot=_UpperCAmelCase ,dtype=_UpperCAmelCase ,seed=_UpperCAmelCase )
snake_case : str = fake()
snake_case : Optional[Any] = fake()
snake_case : Optional[Any] = fake()
return _Datasets(train=_UpperCAmelCase ,validation=_UpperCAmelCase ,test=_UpperCAmelCase )
if not source_url: # empty string check
snake_case : Dict = DEFAULT_SOURCE_URL
snake_case : Dict = """train-images-idx3-ubyte.gz"""
snake_case : Union[str, Any] = """train-labels-idx1-ubyte.gz"""
snake_case : Union[str, Any] = """t10k-images-idx3-ubyte.gz"""
snake_case : Dict = """t10k-labels-idx1-ubyte.gz"""
snake_case : List[str] = _maybe_download(
_UpperCAmelCase ,_UpperCAmelCase ,source_url + train_images_file )
with gfile.Open(_UpperCAmelCase ,"""rb""" ) as f:
snake_case : str = _extract_images(_UpperCAmelCase )
snake_case : Optional[Any] = _maybe_download(
_UpperCAmelCase ,_UpperCAmelCase ,source_url + train_labels_file )
with gfile.Open(_UpperCAmelCase ,"""rb""" ) as f:
snake_case : Optional[int] = _extract_labels(_UpperCAmelCase ,one_hot=_UpperCAmelCase )
snake_case : Union[str, Any] = _maybe_download(
_UpperCAmelCase ,_UpperCAmelCase ,source_url + test_images_file )
with gfile.Open(_UpperCAmelCase ,"""rb""" ) as f:
snake_case : str = _extract_images(_UpperCAmelCase )
snake_case : str = _maybe_download(
_UpperCAmelCase ,_UpperCAmelCase ,source_url + test_labels_file )
with gfile.Open(_UpperCAmelCase ,"""rb""" ) as f:
snake_case : Optional[int] = _extract_labels(_UpperCAmelCase ,one_hot=_UpperCAmelCase )
if not 0 <= validation_size <= len(_UpperCAmelCase ):
snake_case : List[str] = (
"""Validation size should be between 0 and """
f"""{len(_UpperCAmelCase )}. Received: {validation_size}."""
)
raise ValueError(_UpperCAmelCase )
snake_case : str = train_images[:validation_size]
snake_case : List[str] = train_labels[:validation_size]
snake_case : str = train_images[validation_size:]
snake_case : Union[str, Any] = train_labels[validation_size:]
snake_case : Optional[Any] = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
snake_case : Union[str, Any] = _DataSet(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
snake_case : Tuple = _DataSet(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
snake_case : str = _DataSet(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
return _Datasets(train=_UpperCAmelCase ,validation=_UpperCAmelCase ,test=_UpperCAmelCase )
| 716 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : List[str] = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowerCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 717 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCamelCase : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=1_8 , A=3_0 , A=4_0_0 , A=None , A=True , A=True , A=None , ) -> str:
snake_case : Optional[Any] = size if size is not None else {"""height""": 2_0, """width""": 2_0}
snake_case : Any = parent
snake_case : Optional[Any] = batch_size
snake_case : List[Any] = num_channels
snake_case : Optional[Any] = image_size
snake_case : Any = min_resolution
snake_case : List[Any] = max_resolution
snake_case : Optional[Any] = size
snake_case : Union[str, Any] = do_normalize
snake_case : List[Any] = do_convert_rgb
snake_case : int = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
snake_case : int = patch_size if patch_size is not None else {"""height""": 1_6, """width""": 1_6}
def UpperCAmelCase ( self ) -> Optional[Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Union[str, Any] = """https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"""
snake_case : Tuple = Image.open(requests.get(__A , stream=__A ).raw ).convert("""RGB""" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ) -> Any:
snake_case : Optional[Any] = PixaStructImageProcessingTester(self )
@property
def UpperCAmelCase ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_convert_rgb""" ) )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Optional[int] = self.image_processor_tester.prepare_dummy_image()
snake_case : int = self.image_processing_class(**self.image_processor_dict )
snake_case : str = 2_0_4_8
snake_case : Optional[Any] = image_processor(__A , return_tensors="""pt""" , max_patches=__A )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.06_06 ) , atol=1e-3 , rtol=1e-3 ) )
def UpperCAmelCase ( self ) -> List[str]:
# Initialize image_processor
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case : str = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case : Any = image_processor(
__A , return_tensors="""pt""" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase ( self ) -> Tuple:
# Initialize image_processor
snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case : List[Any] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
snake_case : Dict = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__A ):
snake_case : List[str] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__A ).flattened_patches
snake_case : Dict = """Hello"""
snake_case : Optional[Any] = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__A , header_text=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case : Tuple = image_processor(
__A , return_tensors="""pt""" , max_patches=__A , header_text=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase ( self ) -> Any:
# Initialize image_processor
snake_case : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
snake_case : Optional[int] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case : Tuple = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case : List[str] = image_processor(
__A , return_tensors="""pt""" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCAmelCase ( self ) -> str:
# Initialize image_processor
snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case : Optional[Any] = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case : Tuple = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case : int = image_processor(
__A , return_tensors="""pt""" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" , )
@require_torch
@require_vision
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = PixaStructImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Optional[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
snake_case : Union[str, Any] = 3
@property
def UpperCAmelCase ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> Dict:
snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_convert_rgb""" ) )
def UpperCAmelCase ( self ) -> str:
# Initialize image_processor
snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case : str = (
(self.image_processor_tester.patch_size["""height"""] * self.image_processor_tester.patch_size["""width"""])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
snake_case : int = image_processor(
image_inputs[0] , return_tensors="""pt""" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
snake_case : Tuple = image_processor(
__A , return_tensors="""pt""" , max_patches=__A ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) | 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def SCREAMING_SNAKE_CASE__ ( lowercase=None ) -> str:
if subparsers is not None:
snake_case : Union[str, Any] = subparsers.add_parser("""env""" )
else:
snake_case : int = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" ,default=_lowerCamelCase ,help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase )
return parser
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
snake_case : List[Any] = torch.__version__
snake_case : Any = torch.cuda.is_available()
snake_case : Dict = is_xpu_available()
snake_case : Tuple = is_npu_available()
snake_case : List[str] = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_lowerCamelCase ):
snake_case : Optional[Any] = load_config_from_file(args.config_file ).to_dict()
snake_case : List[Any] = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": f"""{pt_version} ({pt_cuda_available})""",
"""PyTorch XPU available""": str(_lowerCamelCase ),
"""PyTorch NPU available""": str(_lowerCamelCase ),
"""System RAM""": f"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
snake_case : List[str] = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
snake_case : List[str] = (
"""\n""".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_lowerCamelCase ,_lowerCamelCase )
else f"""\t{accelerate_config}"""
)
print(_lowerCamelCase )
snake_case : str = accelerate_config
return info
def SCREAMING_SNAKE_CASE__ ( ) -> str:
snake_case : Optional[Any] = env_command_parser()
snake_case : Optional[Any] = parser.parse_args()
env_command(_lowerCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 719 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
snake_case : Dict = {}
snake_case : Optional[int] = job["""started_at"""]
snake_case : Tuple = job["""completed_at"""]
snake_case : Any = date_parser.parse(lowercase_ )
snake_case : Tuple = date_parser.parse(lowercase_ )
snake_case : Dict = round((end_datetime - start_datetime).total_seconds() / 60.0 )
snake_case : int = start
snake_case : Tuple = end
snake_case : int = duration_in_min
return job_info
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=None ) -> Tuple:
snake_case : List[Any] = None
if token is not None:
snake_case : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
snake_case : Any = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
snake_case : List[str] = requests.get(lowercase_ ,headers=lowercase_ ).json()
snake_case : str = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(lowercase_ ) for job in result["""jobs"""]} )
snake_case : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(lowercase_ ):
snake_case : Optional[int] = requests.get(url + f"""&page={i + 2}""" ,headers=lowercase_ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(lowercase_ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
lowerCamelCase : List[str] = parser.parse_args()
lowerCamelCase : str = get_job_time(args.workflow_run_id)
lowerCamelCase : List[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v["duration"]}""")
| 720 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : Dict = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase : List[Any] = {
'distilbert-base-uncased': 5_1_2,
'distilbert-base-uncased-distilled-squad': 5_1_2,
'distilbert-base-cased': 5_1_2,
'distilbert-base-cased-distilled-squad': 5_1_2,
'distilbert-base-german-cased': 5_1_2,
'distilbert-base-multilingual-cased': 5_1_2,
}
lowerCamelCase : Any = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class __lowercase (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = PRETRAINED_INIT_CONFIGURATION
_snake_case = ['input_ids', 'attention_mask']
_snake_case = DistilBertTokenizer
def __init__( self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ) -> Tuple:
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
snake_case : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _a ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _a ) != tokenize_chinese_chars
):
snake_case : List[str] = getattr(_a , normalizer_state.pop("""type""" ) )
snake_case : int = do_lower_case
snake_case : Union[str, Any] = strip_accents
snake_case : Union[str, Any] = tokenize_chinese_chars
snake_case : int = normalizer_class(**_a )
snake_case : List[Any] = do_lower_case
def UpperCAmelCase ( self , A , A=None ) -> Any:
snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase ( self , A , A = None ) -> Optional[Any]:
snake_case : List[str] = [self.sep_token_id]
snake_case : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self , A , A = None ) -> Any:
snake_case : Dict = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 721 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 | 0 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"""`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got """
f"""{test_file} instead.""" )
snake_case : Optional[int] = components[-1]
if not test_fn.endswith("""py""" ):
raise ValueError(f"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("""test_modeling_""" ):
raise ValueError(
f"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
snake_case : Any = components[:-1] + [test_fn.replace(""".py""" ,"""""" )]
snake_case : Dict = """.""".join(_lowerCamelCase )
return test_module_path
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : Tuple = get_module_path(_lowerCamelCase )
snake_case : Tuple = importlib.import_module(_lowerCamelCase )
return test_module
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
snake_case : List[str] = []
snake_case : List[Any] = get_test_module(_lowerCamelCase )
for attr in dir(_lowerCamelCase ):
if attr.endswith("""ModelTester""" ):
tester_classes.append(getattr(_lowerCamelCase ,_lowerCamelCase ) )
# sort with class names
return sorted(_lowerCamelCase ,key=lambda lowercase : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
snake_case : str = []
snake_case : Optional[int] = get_test_module(_lowerCamelCase )
for attr in dir(_lowerCamelCase ):
snake_case : Any = getattr(_lowerCamelCase ,_lowerCamelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
snake_case : Tuple = getattr(_lowerCamelCase ,"""all_model_classes""" ,[] )
if len(_lowerCamelCase ) > 0:
test_classes.append(_lowerCamelCase )
# sort with class names
return sorted(_lowerCamelCase ,key=lambda lowercase : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : List[str] = get_test_classes(_lowerCamelCase )
snake_case : Dict = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_lowerCamelCase ,key=lambda lowercase : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
snake_case : Optional[Any] = test_class()
if hasattr(_lowerCamelCase ,"""setUp""" ):
test.setUp()
snake_case : Any = None
if hasattr(_lowerCamelCase ,"""model_tester""" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
snake_case : Optional[Any] = test.model_tester.__class__
return model_tester
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Optional[int]:
snake_case : Tuple = get_test_classes(_lowerCamelCase )
snake_case : Any = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_lowerCamelCase )
# sort with class names
return sorted(_lowerCamelCase ,key=lambda lowercase : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Any:
snake_case : Tuple = get_test_classes_for_model(_lowerCamelCase ,_lowerCamelCase )
snake_case : str = []
for test_class in test_classes:
snake_case : Dict = get_model_tester_from_test_class(_lowerCamelCase )
if tester_class is not None:
tester_classes.append(_lowerCamelCase )
# sort with class names
return sorted(_lowerCamelCase ,key=lambda lowercase : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
snake_case : Any = get_test_classes(_lowerCamelCase )
snake_case : str = {test_class: get_model_tester_from_test_class(_lowerCamelCase ) for test_class in test_classes}
return test_tester_mapping
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
snake_case : Optional[Any] = get_model_classes(_lowerCamelCase )
snake_case : Optional[int] = {
model_class: get_test_classes_for_model(_lowerCamelCase ,_lowerCamelCase ) for model_class in model_classes
}
return model_test_mapping
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : Optional[int] = get_model_classes(_lowerCamelCase )
snake_case : Optional[int] = {
model_class: get_tester_classes_for_model(_lowerCamelCase ,_lowerCamelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
return o
elif isinstance(_lowerCamelCase ,_lowerCamelCase ):
return o.__name__
elif isinstance(_lowerCamelCase ,(list, tuple) ):
return [to_json(_lowerCamelCase ) for x in o]
elif isinstance(_lowerCamelCase ,_lowerCamelCase ):
return {to_json(_lowerCamelCase ): to_json(_lowerCamelCase ) for k, v in o.items()}
else:
return o
| 700 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase=28123 ) -> List[str]:
snake_case : str = [1] * (limit + 1)
for i in range(2 ,int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 ,limit // i + 1 ):
sum_divs[k * i] += k + i
snake_case : Optional[Any] = set()
snake_case : int = 0
for n in range(1 ,limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 701 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase : Union[str, Any] = parser.parse_args()
if args.model_type == "roberta":
lowerCamelCase : int = RobertaForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase : List[Any] = 'roberta'
elif args.model_type == "gpt2":
lowerCamelCase : str = GPTaLMHeadModel.from_pretrained(args.model_name)
lowerCamelCase : Dict = 'transformer'
lowerCamelCase : List[Any] = model.state_dict()
lowerCamelCase : List[Any] = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
lowerCamelCase : Optional[int] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
lowerCamelCase : List[Any] = f"""{prefix}.embeddings.{w}.weight"""
lowerCamelCase : List[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
lowerCamelCase : List[Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
lowerCamelCase : Tuple = state_dict[param_name]
# Transformer Blocks #
lowerCamelCase : Optional[int] = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
lowerCamelCase : Any = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
lowerCamelCase : Union[str, Any] = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
lowerCamelCase : Tuple = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
lowerCamelCase : Union[str, Any] = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase : Union[str, Any] = state_dict[f"""lm_head.dense.{w}"""]
lowerCamelCase : Union[str, Any] = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
lowerCamelCase : Optional[int] = state_dict[f"""{prefix}.ln_f.{w}"""]
lowerCamelCase : Optional[int] = state_dict['lm_head.weight']
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 0 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
lowerCamelCase : List[Any] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
lowerCamelCase : List[Any] = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
lowerCamelCase : Any = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[Any]:
snake_case : Tuple = simple_accuracy(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
snake_case : List[Any] = float(fa_score(y_true=SCREAMING_SNAKE_CASE_ ,y_pred=SCREAMING_SNAKE_CASE_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[str]:
snake_case : Dict = np.array(SCREAMING_SNAKE_CASE_ )
snake_case : Any = np.array(SCREAMING_SNAKE_CASE_ )
snake_case : Dict = en_sentvecs.shape[0]
# mean centering
snake_case : Any = en_sentvecs - np.mean(SCREAMING_SNAKE_CASE_ ,axis=0 )
snake_case : Union[str, Any] = in_sentvecs - np.mean(SCREAMING_SNAKE_CASE_ ,axis=0 )
snake_case : Tuple = cdist(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,"""cosine""" )
snake_case : Any = np.array(range(SCREAMING_SNAKE_CASE_ ) )
snake_case : Optional[int] = sim.argsort(axis=1 )[:, :10]
snake_case : int = np.any(preds == actual[:, None] ,axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
"""references""": datasets.Value("""int64""" )
if self.config_name != """cvit-mkb-clsr"""
else datasets.Sequence(datasets.Value("""float32""" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if self.config_name != """cvit-mkb-clsr""" else None , )
def UpperCAmelCase ( self , A , A ) -> str:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(A , A )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(A , A )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(A , A )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", """
"""\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", """
"""\"wiki-ner\"]""" )
| 703 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 684 | 0 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
snake_case : List[str] = np.max(__lowercase ,axis=-1 ,keepdims=__lowercase )
snake_case : Optional[Any] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=__lowercase )
class __lowercase (__A ):
"""simple docstring"""
def UpperCAmelCase ( self , **A ) -> List[str]:
snake_case : str = {}
if "second_text" in kwargs:
snake_case : str = kwargs['second_text']
return preprocess_kwargs, {}, {}
def UpperCAmelCase ( self , A , A=None ) -> str:
return self.tokenizer(A , text_pair=A , return_tensors=self.framework )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
return self.model(**A )
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : List[Any] = model_outputs.logits[0].numpy()
snake_case : Union[str, Any] = softmax(A )
snake_case : Any = np.argmax(A )
snake_case : List[str] = self.model.config.idalabel[best_class]
snake_case : Tuple = probabilities[best_class].item()
snake_case : Union[str, Any] = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 704 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spm_char.model'}
lowerCamelCase : List[str] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCamelCase : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Tuple = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
snake_case : Dict = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
snake_case : Any = [1]
if token_ids_a is None:
return ([0] * len(A )) + suffix_ones
return ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
lowerCamelCase : Any = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False)
parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not')
parser.add_argument('--steps', default=None, type=int, help='Num inference steps')
lowerCamelCase : List[str] = parser.parse_args()
lowerCamelCase : Any = 'cpu'
lowerCamelCase : Optional[Any] = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'
lowerCamelCase : Dict = 'path-to-your-trained-model'
lowerCamelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
lowerCamelCase : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
lowerCamelCase : List[str] = pipe.to(device)
# to channels last
lowerCamelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
lowerCamelCase : Optional[Any] = pipe.vae.to(memory_format=torch.channels_last)
lowerCamelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
lowerCamelCase : int = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
lowerCamelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4)
lowerCamelCase : Tuple = torch.rand(1) * 9_9_9
lowerCamelCase : List[Any] = torch.randn(2, 7_7, 7_6_8)
lowerCamelCase : Optional[int] = (sample, timestep, encoder_hidden_status)
try:
lowerCamelCase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
lowerCamelCase : Any = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
lowerCamelCase : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
lowerCamelCase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
lowerCamelCase : Optional[int] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
lowerCamelCase : List[Any] = 6_6_6
lowerCamelCase : Tuple = torch.Generator(device).manual_seed(seed)
lowerCamelCase : List[Any] = {'generator': generator}
if args.steps is not None:
lowerCamelCase : str = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
lowerCamelCase : List[str] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('generated.png')
| 705 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 | 0 |
import math
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Dict:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowerCAmelCase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCamelCase : Any = '''Enter the base and the power separated by a comma: '''
lowerCamelCase : Dict = map(int, input(prompt).split(','))
lowerCamelCase : Optional[int] = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCamelCase : Union[str, Any] = res(xa, ya)
lowerCamelCase : int = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 706 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spiece.model'}
lowerCamelCase : Any = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
lowerCamelCase : List[Any] = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
lowerCamelCase : Any = 0
lowerCamelCase : Optional[int] = 1
lowerCamelCase : int = 2
lowerCamelCase : List[str] = 3
lowerCamelCase : int = 4
class __lowercase (__lowerCamelCase ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = '''left'''
def __init__( self , A , A=False , A=True , A=False , A="<s>" , A="</s>" , A="<unk>" , A="<sep>" , A="<pad>" , A="<cls>" , A="<mask>" , A=["<eop>", "<eod>"] , A = None , **A , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
snake_case : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
snake_case : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , additional_special_tokens=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
snake_case : str = 3
snake_case : Dict = do_lower_case
snake_case : str = remove_space
snake_case : Tuple = keep_accents
snake_case : Dict = vocab_file
snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def UpperCAmelCase ( self ) -> Tuple:
return len(self.sp_model )
def UpperCAmelCase ( self ) -> str:
snake_case : List[str] = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
snake_case : Any = self.__dict__.copy()
snake_case : Optional[int] = None
return state
def __setstate__( self , A ) -> Optional[Any]:
snake_case : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : int = {}
snake_case : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
if self.remove_space:
snake_case : Optional[int] = " ".join(inputs.strip().split() )
else:
snake_case : str = inputs
snake_case : Any = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
snake_case : Dict = unicodedata.normalize("""NFKD""" , a_ )
snake_case : int = "".join([c for c in outputs if not unicodedata.combining(a_ )] )
if self.do_lower_case:
snake_case : Any = outputs.lower()
return outputs
def UpperCAmelCase ( self , A ) -> Any:
snake_case : List[Any] = self.preprocess_text(a_ )
snake_case : Optional[int] = self.sp_model.encode(a_ , out_type=a_ )
snake_case : List[str] = []
for piece in pieces:
if len(a_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
snake_case : Tuple = self.sp_model.EncodeAsPieces(piece[:-1].replace(a_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case : int = cur_pieces[1:]
else:
snake_case : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(a_ )
else:
new_pieces.append(a_ )
return new_pieces
def UpperCAmelCase ( self , A ) -> Optional[Any]:
return self.sp_model.PieceToId(a_ )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.IdToPiece(a_ )
def UpperCAmelCase ( self , A ) -> Optional[int]:
snake_case : Dict = "".join(a_ ).replace(a_ , """ """ ).strip()
return out_string
def UpperCAmelCase ( self , A , A = False , A = None , A = True , **A , ) -> Union[str, Any]:
snake_case : int = kwargs.pop("""use_source_tokenizer""" , a_ )
snake_case : List[str] = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
snake_case : Optional[int] = []
snake_case : List[str] = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
snake_case : Union[str, Any] = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
snake_case : Union[str, Any] = "".join(a_ )
snake_case : Optional[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
snake_case : List[Any] = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def UpperCAmelCase ( self , A , A = None ) -> List[Any]:
snake_case : Optional[Any] = [self.sep_token_id]
snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is not None:
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1, 1]
return ([0] * len(a_ )) + [1, 1]
def UpperCAmelCase ( self , A , A = None ) -> Any:
snake_case : Optional[Any] = [self.sep_token_id]
snake_case : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase ( self , A , A = None ) -> Tuple:
if not os.path.isdir(a_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
a_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , """wb""" ) as fi:
snake_case : Dict = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 707 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : Tuple = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase : Tuple = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase : Tuple = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase : Any = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
lowerCamelCase : Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
lowerCamelCase : Any = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
lowerCamelCase : List[str] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
lowerCamelCase : str = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
lowerCamelCase : Optional[Any] = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_snake_case = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_snake_case = DPRContextEncoderTokenizer
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_snake_case = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_snake_case = DPRQuestionEncoderTokenizer
lowerCamelCase : Optional[Any] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
lowerCamelCase : Any = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
lowerCamelCase : Dict = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(UpperCamelCase__ )
class __lowercase :
"""simple docstring"""
def __call__( self , A , A = None , A = None , A = False , A = False , A = None , A = None , A = None , **A , ) -> Optional[int]:
if titles is None and texts is None:
return super().__call__(
A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , )
elif titles is None or texts is None:
snake_case : Optional[int] = titles if texts is None else texts
return super().__call__(
A , A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , )
snake_case : Union[str, Any] = titles if not isinstance(A , A ) else [titles]
snake_case : Optional[int] = texts if not isinstance(A , A ) else [texts]
snake_case : Optional[int] = len(A )
snake_case : Dict = questions if not isinstance(A , A ) else [questions] * n_passages
assert len(A ) == len(
A ), f"""There should be as many titles than texts but got {len(A )} titles and {len(A )} texts."""
snake_case : Union[str, Any] = super().__call__(A , A , padding=A , truncation=A )["""input_ids"""]
snake_case : List[Any] = super().__call__(A , add_special_tokens=A , padding=A , truncation=A )["""input_ids"""]
snake_case : str = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(A , A )
]
}
if return_attention_mask is not False:
snake_case : Any = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case : Union[str, Any] = attention_mask
return self.pad(A , padding=A , max_length=A , return_tensors=A )
def UpperCAmelCase ( self , A , A , A = 1_6 , A = 6_4 , A = 4 , ) -> Optional[int]:
snake_case : Any = reader_input["""input_ids"""]
snake_case , snake_case , snake_case : Any = reader_output[:3]
snake_case : Any = len(A )
snake_case : Optional[Any] = sorted(range(A ) , reverse=A , key=relevance_logits.__getitem__ )
snake_case : str = []
for doc_id in sorted_docs:
snake_case : Optional[Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case : Optional[int] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case : Any = sequence_ids.index(self.pad_token_id )
else:
snake_case : Tuple = len(A )
snake_case : int = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=A , top_spans=A , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=A , start_index=A , end_index=A , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCAmelCase ( self , A , A , A , A , ) -> Optional[int]:
snake_case : Optional[Any] = []
for start_index, start_score in enumerate(A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case : List[str] = sorted(A , key=lambda A : x[1] , reverse=A )
snake_case : str = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f"""Wrong span indices: [{start_index}:{end_index}]"""
snake_case : int = end_index - start_index + 1
assert length <= max_answer_length, f"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = READER_PRETRAINED_VOCAB_FILES_MAP
_snake_case = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = READER_PRETRAINED_INIT_CONFIGURATION
_snake_case = ["""input_ids""", """attention_mask"""]
_snake_case = DPRReaderTokenizer
| 708 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __lowercase (lowerCAmelCase__ ):
"""simple docstring"""
_snake_case = "yolos"
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=[5_1_2, 8_6_4] , A=1_6 , A=3 , A=True , A=1_0_0 , A=True , A=False , A=1 , A=5 , A=2 , A=5 , A=2 , A=0.1 , **A , ) -> Tuple:
super().__init__(**_SCREAMING_SNAKE_CASE )
snake_case : Tuple = hidden_size
snake_case : List[Any] = num_hidden_layers
snake_case : Tuple = num_attention_heads
snake_case : List[Any] = intermediate_size
snake_case : Dict = hidden_act
snake_case : List[Any] = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[str] = layer_norm_eps
snake_case : Tuple = image_size
snake_case : Tuple = patch_size
snake_case : Dict = num_channels
snake_case : List[Any] = qkv_bias
snake_case : str = num_detection_tokens
snake_case : str = use_mid_position_embeddings
snake_case : Dict = auxiliary_loss
# Hungarian matcher
snake_case : Optional[Any] = class_cost
snake_case : Tuple = bbox_cost
snake_case : Union[str, Any] = giou_cost
# Loss coefficients
snake_case : str = bbox_loss_coefficient
snake_case : int = giou_loss_coefficient
snake_case : Tuple = eos_coefficient
class __lowercase (lowerCAmelCase__ ):
"""simple docstring"""
_snake_case = version.parse("""1.11""" )
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase ( self ) -> float:
return 1e-4
@property
def UpperCAmelCase ( self ) -> int:
return 1_2
| 709 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
if not nums:
raise ValueError("""List is empty""" )
return sum(A_ ) / len(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : List[str] = '''https://openaipublic.azureedge.net/jukebox/models/'''
lowerCamelCase : str = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case : List[Any] = key.replace(""".model.1.bias""" ,""".conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case : Union[str, Any] = key.replace(""".model.1.weight""" ,""".conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
snake_case : Union[str, Any] = key.replace(""".model.3.bias""" ,""".conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
snake_case : List[Any] = key.replace(""".model.3.weight""" ,""".conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
snake_case : Optional[Any] = key.replace("""conditioner_blocks.0""" ,"""conditioner_blocks""" )
if "prime_prior" in key:
snake_case : Optional[Any] = key.replace("""prime_prior""" ,"""encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
snake_case : Optional[Any] = key.replace(""".emb.""" ,""".""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" ,""".codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" ,"""metadata_embedding.""" )
if "x_emb.emb." in key:
snake_case : Optional[int] = key.replace("""0.x_emb.emb""" ,"""embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" ,"""encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" ,""".layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" ,"""_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" ,"""encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" ,"""encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" ,"""fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" ,"""embed_tokens""" )
return key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Any:
snake_case : Optional[Any] = {}
import re
snake_case : List[str] = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case : Union[str, Any] = re.compile(
R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case : List[Any] = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case : Optional[Any] = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
snake_case : Optional[int] = re.compile(
R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case : List[str] = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
snake_case : str = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
snake_case : Union[str, Any] = re.compile(
R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
snake_case : Optional[int] = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowercase ):
snake_case : Dict = re_encoder_block_conv_in.match(lowercase )
snake_case : Dict = regex_match.groups()
snake_case : Tuple = int(groups[2] ) * 2 + int(groups[3] )
snake_case : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
snake_case : Optional[Any] = re_encoder_block_conv_in.sub(lowercase ,lowercase )
elif re_encoder_block_resnet.fullmatch(lowercase ):
snake_case : int = re_encoder_block_resnet.match(lowercase )
snake_case : Union[str, Any] = regex_match.groups()
snake_case : Dict = int(groups[2] ) * 2 + int(groups[3] )
snake_case : str = {'1': 1, '3': 2}[groups[-2]]
snake_case : Tuple = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
snake_case : int = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case : Tuple = prefix + resnet_block
snake_case : str = re_encoder_block_resnet.sub(lowercase ,lowercase )
elif re_encoder_block_proj_out.fullmatch(lowercase ):
snake_case : List[Any] = re_encoder_block_proj_out.match(lowercase )
snake_case : Tuple = regex_match.groups()
snake_case : List[Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
snake_case : List[str] = re_encoder_block_proj_out.sub(lowercase ,lowercase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowercase ):
snake_case : Tuple = re_decoder_block_conv_out.match(lowercase )
snake_case : Dict = regex_match.groups()
snake_case : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case : Tuple = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
snake_case : int = re_decoder_block_conv_out.sub(lowercase ,lowercase )
elif re_decoder_block_resnet.fullmatch(lowercase ):
snake_case : Any = re_decoder_block_resnet.match(lowercase )
snake_case : Union[str, Any] = regex_match.groups()
snake_case : str = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case : int = {'1': 1, '3': 2}[groups[-2]]
snake_case : Any = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
snake_case : int = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case : Union[str, Any] = prefix + resnet_block
snake_case : str = re_decoder_block_resnet.sub(lowercase ,lowercase )
elif re_decoder_block_proj_in.fullmatch(lowercase ):
snake_case : List[Any] = re_decoder_block_proj_in.match(lowercase )
snake_case : Dict = regex_match.groups()
snake_case : Optional[int] = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
snake_case : Any = re_decoder_block_proj_in.sub(lowercase ,lowercase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowercase ):
snake_case : Optional[Any] = re_prior_cond_conv_out.match(lowercase )
snake_case : Tuple = regex_match.groups()
snake_case : str = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case : Tuple = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
snake_case : str = re_prior_cond_conv_out.sub(lowercase ,lowercase )
elif re_prior_cond_resnet.fullmatch(lowercase ):
snake_case : Optional[int] = re_prior_cond_resnet.match(lowercase )
snake_case : Optional[Any] = regex_match.groups()
snake_case : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case : Dict = {'1': 1, '3': 2}[groups[-2]]
snake_case : Dict = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
snake_case : Tuple = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case : List[Any] = prefix + resnet_block
snake_case : str = re_prior_cond_resnet.sub(lowercase ,lowercase )
elif re_prior_cond_proj_in.fullmatch(lowercase ):
snake_case : Optional[int] = re_prior_cond_proj_in.match(lowercase )
snake_case : Union[str, Any] = regex_match.groups()
snake_case : int = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
snake_case : int = re_prior_cond_proj_in.sub(lowercase ,lowercase )
# keep original key
else:
snake_case : Dict = original_key
snake_case : Any = replace_key(lowercase )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
snake_case : Dict = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
snake_case : List[Any] = original_key
snake_case : List[str] = original_key
snake_case : Union[str, Any] = value
return new_dict
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase=None ,lowercase=None ) -> Any:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ):
snake_case : Any = requests.get(f"""{PREFIX}{file}""" ,allow_redirects=lowercase )
os.makedirs(f"""{pytorch_dump_folder_path}/""" ,exist_ok=lowercase )
open(f"""{pytorch_dump_folder_path}/{file.split("/" )[-1]}""" ,"""wb""" ).write(r.content )
snake_case : Tuple = MODEL_MAPPING[model_name.split("""/""" )[-1]]
snake_case : Any = JukeboxConfig.from_pretrained(lowercase )
snake_case : Optional[Any] = JukeboxModel(lowercase )
snake_case : str = []
snake_case : str = {}
for i, dict_name in enumerate(lowercase ):
snake_case : List[Any] = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}""" )['model']
snake_case : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
snake_case : Optional[int] = old_dic[k]
elif k.endswith(""".w""" ):
snake_case : List[str] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
snake_case : List[str] = old_dic[k]
else:
snake_case : Dict = old_dic[k]
snake_case : List[str] = 'vqvae' if i == 0 else f"""priors.{3 - i}"""
snake_case : Optional[Any] = fix_jukebox_keys(lowercase ,model.state_dict() ,lowercase ,lowercase )
weight_dict.append(lowercase )
snake_case : Dict = weight_dict.pop(0 )
model.vqvae.load_state_dict(lowercase )
for i in range(len(lowercase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowercase ).mkdir(exist_ok=lowercase )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" ,"""w""" ) as txtfile:
json.dump(lowercase ,lowercase )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
return weight_dict
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
lowerCamelCase : Dict = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 711 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCamelCase : int = [True] * 1_0_0_0_0_0_1
lowerCamelCase : Any = 2
while i * i <= 1_0_0_0_0_0_0:
if seive[i]:
for j in range(i * i, 1_0_0_0_0_0_1, i):
lowerCamelCase : List[str] = False
i += 1
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> bool:
return seive[n]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> bool:
return any(digit in """02468""" for digit in str(__snake_case ) )
def SCREAMING_SNAKE_CASE__ ( lowercase = 1000000 ) -> list[int]:
snake_case : Union[str, Any] = [2] # result already includes the number 2.
for num in range(3 ,limit + 1 ,2 ):
if is_prime(__snake_case ) and not contains_an_even_digit(__snake_case ):
snake_case : Tuple = str(__snake_case )
snake_case : str = [int(str_num[j:] + str_num[:j] ) for j in range(len(__snake_case ) )]
if all(is_prime(__snake_case ) for i in list_nums ):
result.append(__snake_case )
return result
def SCREAMING_SNAKE_CASE__ ( ) -> int:
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 712 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 713 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 0 |
from math import factorial
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(A__ ) // (factorial(A__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f"""fifty-two card deck is: {combinations(5_2, 5)}\n""",
)
print(
'If a class of 40 students must be arranged into groups of',
f"""4 for group projects, there are {combinations(4_0, 4)} ways""",
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f"""are {combinations(1_0, 3)} ways that first, second and""",
'third place can be awarded.',
)
| 714 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 0 |
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
if torch.cuda.is_available():
snake_case : List[Any] = torch.cuda.device_count()
else:
snake_case : Any = 0
print(f"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 715 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCamelCase : str = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PILImageResampling.BILINEAR , A = True , A = 1 / 2_5_5 , A = True , A = None , A = True , **A , ) -> None:
super().__init__(**lowercase_ )
snake_case : Optional[Any] = size if size is not None else {"""shortest_edge""": 2_2_4}
snake_case : Optional[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
snake_case : int = crop_size if crop_size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : Optional[Any] = get_size_dict(lowercase_ , param_name="""crop_size""" )
snake_case : Any = do_resize
snake_case : Tuple = size
snake_case : Any = resample
snake_case : Union[str, Any] = do_rescale
snake_case : Dict = rescale_factor
snake_case : Optional[Any] = do_center_crop
snake_case : Optional[Any] = crop_size
snake_case : Union[str, Any] = do_flip_channel_order
def UpperCAmelCase ( self , A , A , A = PIL.Image.BILINEAR , A = None , **A , ) -> np.ndarray:
snake_case : Optional[int] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
snake_case : Any = get_resize_output_image_size(lowercase_ , size=size["""shortest_edge"""] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Optional[Any] = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowercase_ , size=(size["""height"""], size["""width"""]) , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> int:
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase ( self , A , A = None ) -> np.ndarray:
return flip_channel_order(lowercase_ , data_format=lowercase_ )
def UpperCAmelCase ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : Tuple = do_resize if do_resize is not None else self.do_resize
snake_case : Union[str, Any] = resample if resample is not None else self.resample
snake_case : int = do_rescale if do_rescale is not None else self.do_rescale
snake_case : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Dict = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
snake_case : List[Any] = size if size is not None else self.size
snake_case : Union[str, Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ )
snake_case : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
snake_case : Any = get_size_dict(lowercase_ , param_name="""crop_size""" )
snake_case : Optional[Any] = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
snake_case : List[Any] = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
snake_case : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
snake_case : str = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
snake_case : Optional[int] = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
snake_case : Optional[Any] = [self.flip_channel_order(image=lowercase_ ) for image in images]
snake_case : int = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
snake_case : Tuple = {"""pixel_values""": images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def UpperCAmelCase ( self , A , A = None ) -> Optional[Any]:
snake_case : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(lowercase_ ):
snake_case : Any = target_sizes.numpy()
snake_case : List[Any] = []
for idx in range(len(lowercase_ ) ):
snake_case : List[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=lowercase_ )
snake_case : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
snake_case : Any = logits.argmax(dim=1 )
snake_case : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 716 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowerCamelCase : Any = logging.get_logger(__name__)
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
_snake_case = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
_snake_case = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_snake_case = field(
default=__lowerCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def UpperCAmelCase ( self ) -> str:
snake_case : Tuple = self.task_name.lower()
class __lowercase (__lowerCAmelCase ):
"""simple docstring"""
_snake_case = '''train'''
_snake_case = '''dev'''
_snake_case = '''test'''
class __lowercase (__lowerCAmelCase ):
"""simple docstring"""
_snake_case = 42
_snake_case = 42
_snake_case = 42
def __init__( self , A , A , A = None , A = Split.train , A = None , ) -> List[Any]:
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , lowerCAmelCase_ , )
snake_case : Tuple = args
snake_case : Optional[Any] = glue_processors[args.task_name]()
snake_case : str = glue_output_modes[args.task_name]
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
try:
snake_case : Any = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
snake_case : str = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
snake_case : Union[str, Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : Optional[Any] = label_list[2], label_list[1]
snake_case : List[str] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case : Dict = cached_features_file + """.lock"""
with FileLock(lowerCAmelCase_ ):
if os.path.exists(lowerCAmelCase_ ) and not args.overwrite_cache:
snake_case : str = time.time()
snake_case : List[Any] = torch.load(lowerCAmelCase_ )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
snake_case : int = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
snake_case : Dict = self.processor.get_test_examples(args.data_dir )
else:
snake_case : Optional[int] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
snake_case : Tuple = examples[:limit_length]
snake_case : int = glue_convert_examples_to_features(
lowerCAmelCase_ , lowerCAmelCase_ , max_length=args.max_seq_length , label_list=lowerCAmelCase_ , output_mode=self.output_mode , )
snake_case : Union[str, Any] = time.time()
torch.save(self.features , lowerCAmelCase_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , A ) -> InputFeatures:
return self.features[i]
def UpperCAmelCase ( self ) -> int:
return self.label_list
| 717 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : Union[str, Any] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case = ["""torch""", """torchsde"""]
def __init__( self , *A , **A ) -> int:
requires_backends(self , ["""torch""", """torchsde"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""torch""", """torchsde"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""torch""", """torchsde"""] )
| 719 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 | 0 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __lowercase (__lowerCamelCase ):
"""simple docstring"""
_snake_case = """detr"""
_snake_case = ["""past_key_values"""]
_snake_case = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , A=True , A=None , A=3 , A=1_0_0 , A=6 , A=2_0_4_8 , A=8 , A=6 , A=2_0_4_8 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=2_5_6 , A=0.1 , A=0.0 , A=0.0 , A=0.02 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
snake_case : Dict = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
snake_case : Optional[int] = backbone_config.get("""model_type""" )
snake_case : Optional[int] = CONFIG_MAPPING[backbone_model_type]
snake_case : Optional[int] = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
# set timm attributes to None
snake_case , snake_case , snake_case : int = None, None, None
snake_case : Union[str, Any] = use_timm_backbone
snake_case : Tuple = backbone_config
snake_case : Optional[int] = num_channels
snake_case : Any = num_queries
snake_case : Any = d_model
snake_case : List[str] = encoder_ffn_dim
snake_case : int = encoder_layers
snake_case : Tuple = encoder_attention_heads
snake_case : Optional[Any] = decoder_ffn_dim
snake_case : Dict = decoder_layers
snake_case : Dict = decoder_attention_heads
snake_case : List[str] = dropout
snake_case : Tuple = attention_dropout
snake_case : List[Any] = activation_dropout
snake_case : int = activation_function
snake_case : Optional[int] = init_std
snake_case : Tuple = init_xavier_std
snake_case : Tuple = encoder_layerdrop
snake_case : int = decoder_layerdrop
snake_case : List[str] = encoder_layers
snake_case : Union[str, Any] = auxiliary_loss
snake_case : Optional[Any] = position_embedding_type
snake_case : List[Any] = backbone
snake_case : Union[str, Any] = use_pretrained_backbone
snake_case : Tuple = dilation
# Hungarian matcher
snake_case : List[str] = class_cost
snake_case : List[Any] = bbox_cost
snake_case : List[Any] = giou_cost
# Loss coefficients
snake_case : Any = mask_loss_coefficient
snake_case : str = dice_loss_coefficient
snake_case : Any = bbox_loss_coefficient
snake_case : Union[str, Any] = giou_loss_coefficient
snake_case : Optional[int] = eos_coefficient
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def UpperCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self ) -> Dict:
return self.d_model
@classmethod
def UpperCAmelCase ( cls , A , **A ) -> int:
return cls(backbone_config=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Optional[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case : List[Any] = self.backbone_config.to_dict()
snake_case : Tuple = self.__class__.model_type
return output
class __lowercase (__lowerCamelCase ):
"""simple docstring"""
_snake_case = version.parse("""1.11""" )
@property
def UpperCAmelCase ( self ) -> int:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def UpperCAmelCase ( self ) -> Dict:
return 1e-5
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return 1_2
| 720 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 0 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_3 , A=7 , A=True , A=True , A=True , A=True , A=True , A=False , A=False , A=False , A=2 , A=9_9 , A=0 , A=3_2 , A=5 , A=4 , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=2 , A=4 , A="last" , A=True , A=None , A=0 , ) -> Optional[int]:
snake_case : List[str] = parent
snake_case : Tuple = batch_size
snake_case : Union[str, Any] = seq_length
snake_case : Dict = is_training
snake_case : Dict = use_input_lengths
snake_case : List[str] = use_token_type_ids
snake_case : Dict = use_labels
snake_case : int = gelu_activation
snake_case : List[str] = sinusoidal_embeddings
snake_case : Tuple = causal
snake_case : Tuple = asm
snake_case : List[Any] = n_langs
snake_case : List[Any] = vocab_size
snake_case : List[Any] = n_special
snake_case : Dict = hidden_size
snake_case : str = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Dict = hidden_dropout_prob
snake_case : Optional[Any] = attention_probs_dropout_prob
snake_case : str = max_position_embeddings
snake_case : List[Any] = type_sequence_label_size
snake_case : Optional[int] = initializer_range
snake_case : Optional[Any] = num_labels
snake_case : str = num_choices
snake_case : Tuple = summary_type
snake_case : Tuple = use_proj
snake_case : List[Any] = scope
snake_case : int = bos_token_id
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : Union[str, Any] = None
if self.use_input_lengths:
snake_case : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
snake_case : Optional[int] = None
if self.use_token_type_ids:
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
snake_case : Optional[int] = None
snake_case : str = None
snake_case : Optional[Any] = None
if self.use_labels:
snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Optional[Any] = ids_tensor([self.batch_size] , 2 ).float()
snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Optional[int] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase ( self ) -> Optional[int]:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> List[str]:
snake_case : Tuple = XLMModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : List[str] = model(snake_case_ , lengths=snake_case_ , langs=snake_case_ )
snake_case : Optional[Any] = model(snake_case_ , langs=snake_case_ )
snake_case : str = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Union[str, Any]:
snake_case : Optional[Any] = XLMWithLMHeadModel(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : int = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Any:
snake_case : List[Any] = XLMForQuestionAnsweringSimple(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : Optional[int] = model(snake_case_ )
snake_case : Tuple = model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ )
snake_case : Union[str, Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Any:
snake_case : Union[str, Any] = XLMForQuestionAnswering(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : int = model(snake_case_ )
snake_case : List[Any] = model(
snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , p_mask=snake_case_ , )
snake_case : Optional[Any] = model(
snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , )
(snake_case ) : Dict = result_with_labels.to_tuple()
snake_case : Optional[Any] = model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ )
(snake_case ) : List[str] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> int:
snake_case : Union[str, Any] = XLMForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : Optional[int] = model(snake_case_ )
snake_case : Union[str, Any] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Optional[int]:
snake_case : List[str] = self.num_labels
snake_case : Union[str, Any] = XLMForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Optional[int]:
snake_case : List[str] = self.num_choices
snake_case : Union[str, Any] = XLMForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : List[Any] = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Dict = self.prepare_config_and_inputs()
(
snake_case
) : Any = config_and_inputs
snake_case : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class __lowercase (_snake_case , _snake_case , _snake_case , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_snake_case = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_snake_case = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase ( self , A , A , A , A , A ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase ( self , A , A , A=False ) -> int:
snake_case : Optional[Any] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
snake_case : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
snake_case : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def UpperCAmelCase ( self ) -> Tuple:
snake_case : int = XLMModelTester(self )
snake_case : Any = ConfigTester(self , config_class=snake_case_ , emb_dim=3_7 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*snake_case_ )
def UpperCAmelCase ( self ) -> int:
snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*snake_case_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*snake_case_ )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*snake_case_ )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*snake_case_ )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*snake_case_ )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case_ )
def UpperCAmelCase ( self , A , A , A , A , A , A=False , A=1 ) -> List[Any]:
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertListEqual(
[isinstance(snake_case_ , snake_case_ ) for iter_attentions in attentions] , [True] * len(snake_case_ ) )
self.assertEqual(len(snake_case_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(snake_case_ ):
# adds PAD dummy token
snake_case : Optional[Any] = min_length + idx + 1
snake_case : Optional[Any] = min_length + idx + 1
snake_case : Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case_ ) )
def UpperCAmelCase ( self , A , A , A , A , A , A=False , A=1 ) -> List[Any]:
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertListEqual(
[isinstance(snake_case_ , snake_case_ ) for iter_hidden_states in hidden_states] , [True] * len(snake_case_ ) , )
self.assertEqual(len(snake_case_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(snake_case_ ):
# adds PAD dummy token
snake_case : Dict = min_length + idx + 1
snake_case : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case_ ) , )
pass
@slow
def UpperCAmelCase ( self ) -> List[Any]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Any = XLMModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Optional[Any] = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(snake_case_ )
snake_case : List[Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=snake_case_ ) # the president
snake_case : List[Any] = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
snake_case : Any = model.generate(snake_case_ , do_sample=snake_case_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case_ )
| 721 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __lowercase (lowercase__ ):
"""simple docstring"""
_snake_case = """big_bird"""
def __init__( self , A=5_0_3_5_8 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu_new" , A=0.1 , A=0.1 , A=4_0_9_6 , A=2 , A=0.02 , A=1e-1_2 , A=True , A=0 , A=1 , A=2 , A=6_6 , A="block_sparse" , A=True , A=False , A=6_4 , A=3 , A=None , **A , ) -> Optional[int]:
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , sep_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case : str = vocab_size
snake_case : Tuple = max_position_embeddings
snake_case : Dict = hidden_size
snake_case : Dict = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : List[str] = intermediate_size
snake_case : Optional[int] = hidden_act
snake_case : Optional[int] = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : List[str] = initializer_range
snake_case : Optional[int] = type_vocab_size
snake_case : int = layer_norm_eps
snake_case : Dict = use_cache
snake_case : Dict = rescale_embeddings
snake_case : List[Any] = attention_type
snake_case : Optional[int] = use_bias
snake_case : str = block_size
snake_case : Dict = num_random_blocks
snake_case : List[Any] = classifier_dropout
class __lowercase (lowercase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> List[Any]:
if self.task == "multiple-choice":
snake_case : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case : List[Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 700 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase = {
'configuration_poolformer': [
'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'PoolFormerConfig',
'PoolFormerOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['PoolFormerFeatureExtractor']
lowerCamelCase = ['PoolFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PoolFormerForImageClassification',
'PoolFormerModel',
'PoolFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 701 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class __lowercase (lowerCamelCase__ ):
"""simple docstring"""
_snake_case = """blip_text_model"""
def __init__( self , A=3_0_5_2_4 , A=7_6_8 , A=7_6_8 , A=3_0_7_2 , A=7_6_8 , A=1_2 , A=8 , A=5_1_2 , A="gelu" , A=1e-1_2 , A=0.0 , A=0.0 , A=0.02 , A=3_0_5_2_2 , A=2 , A=0 , A=1_0_2 , A=True , A=True , **A , ) -> Dict:
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , sep_token_id=__lowerCamelCase , **__lowerCamelCase , )
snake_case : Any = vocab_size
snake_case : List[Any] = hidden_size
snake_case : Dict = encoder_hidden_size
snake_case : List[str] = intermediate_size
snake_case : List[Any] = projection_dim
snake_case : Any = hidden_dropout_prob
snake_case : Optional[int] = num_hidden_layers
snake_case : List[Any] = num_attention_heads
snake_case : Optional[int] = max_position_embeddings
snake_case : List[Any] = layer_norm_eps
snake_case : Union[str, Any] = hidden_act
snake_case : Optional[int] = initializer_range
snake_case : Tuple = attention_probs_dropout_prob
snake_case : List[str] = is_decoder
snake_case : List[Any] = use_cache
@classmethod
def UpperCAmelCase ( cls , A , **A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase )
snake_case : List[Any] = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the text config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
snake_case : Union[str, Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class __lowercase (lowerCamelCase__ ):
"""simple docstring"""
_snake_case = """blip_vision_model"""
def __init__( self , A=7_6_8 , A=3_0_7_2 , A=5_1_2 , A=1_2 , A=1_2 , A=3_8_4 , A=1_6 , A="gelu" , A=1e-5 , A=0.0 , A=1e-1_0 , **A , ) -> List[Any]:
super().__init__(**__lowerCamelCase )
snake_case : List[Any] = hidden_size
snake_case : Optional[int] = intermediate_size
snake_case : Tuple = projection_dim
snake_case : Any = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : List[Any] = patch_size
snake_case : Optional[int] = image_size
snake_case : Optional[Any] = initializer_range
snake_case : Optional[Any] = attention_dropout
snake_case : str = layer_norm_eps
snake_case : Tuple = hidden_act
@classmethod
def UpperCAmelCase ( cls , A , **A ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__lowerCamelCase )
snake_case : str = cls.get_config_dict(__lowerCamelCase , **__lowerCamelCase )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get("""model_type""" ) == "blip":
snake_case : List[str] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__lowerCamelCase , **__lowerCamelCase )
class __lowercase (lowerCamelCase__ ):
"""simple docstring"""
_snake_case = """blip"""
_snake_case = True
def __init__( self , A=None , A=None , A=5_1_2 , A=2.65_92 , A=2_5_6 , **A , ) -> Dict:
super().__init__(**__lowerCamelCase )
if text_config is None:
snake_case : Tuple = {}
logger.info("""`text_config` is `None`. Initializing the `BlipTextConfig` with default values.""" )
if vision_config is None:
snake_case : Dict = {}
logger.info("""`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.""" )
snake_case : int = BlipTextConfig(**__lowerCamelCase )
snake_case : Optional[Any] = BlipVisionConfig(**__lowerCamelCase )
snake_case : List[str] = self.vision_config.hidden_size
snake_case : List[Any] = projection_dim
snake_case : Optional[Any] = logit_scale_init_value
snake_case : Union[str, Any] = 1.0
snake_case : Optional[Any] = 0.02
snake_case : int = image_text_hidden_size
@classmethod
def UpperCAmelCase ( cls , A , A , **A ) -> Optional[int]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__lowerCamelCase )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case : Tuple = self.text_config.to_dict()
snake_case : Union[str, Any] = self.vision_config.to_dict()
snake_case : str = self.__class__.model_type
return output
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Dict = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 703 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 684 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=1_4 , A=7 , A=True , A=True , A=True , A=True , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> Union[str, Any]:
snake_case : str = parent
snake_case : Optional[Any] = batch_size
snake_case : Optional[int] = seq_length
snake_case : Any = is_training
snake_case : List[str] = use_token_type_ids
snake_case : Any = use_input_mask
snake_case : Any = use_labels
snake_case : Tuple = use_mc_token_ids
snake_case : Union[str, Any] = vocab_size
snake_case : int = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : List[Any] = intermediate_size
snake_case : List[str] = hidden_act
snake_case : Tuple = hidden_dropout_prob
snake_case : str = attention_probs_dropout_prob
snake_case : str = max_position_embeddings
snake_case : int = type_vocab_size
snake_case : int = type_sequence_label_size
snake_case : Optional[Any] = initializer_range
snake_case : Optional[int] = num_labels
snake_case : List[Any] = num_choices
snake_case : List[Any] = scope
snake_case : int = self.vocab_size - 1
def UpperCAmelCase ( self ) -> Any:
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Tuple = None
if self.use_input_mask:
snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : str = None
if self.use_token_type_ids:
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Any = None
if self.use_mc_token_ids:
snake_case : Tuple = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
snake_case : int = None
snake_case : Optional[Any] = None
snake_case : Union[str, Any] = None
if self.use_labels:
snake_case : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case : Tuple = self.get_config()
snake_case : Union[str, Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase ( self ) -> Any:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def UpperCAmelCase ( self , A , A , A , A , A , *A ) -> Union[str, Any]:
snake_case : List[str] = CTRLModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
model(lowercase_ , token_type_ids=lowercase_ , head_mask=lowercase_ )
model(lowercase_ , token_type_ids=lowercase_ )
snake_case : Any = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def UpperCAmelCase ( self , A , A , A , A , A , *A ) -> Tuple:
snake_case : Tuple = CTRLLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case : int = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self ) -> str:
snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
snake_case
) : Optional[Any] = config_and_inputs
snake_case : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def UpperCAmelCase ( self , A , A , A , A , *A ) -> Union[str, Any]:
snake_case : int = self.num_labels
snake_case : Optional[int] = CTRLForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __lowercase (snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
_snake_case = (CTRLLMHeadModel,) if is_torch_available() else ()
_snake_case = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case = True
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self , A , A , A , A , A ) -> Optional[int]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def UpperCAmelCase ( self ) -> int:
snake_case : List[str] = CTRLModelTester(self )
snake_case : str = ConfigTester(self , config_class=lowercase_ , n_embd=3_7 )
def UpperCAmelCase ( self ) -> Any:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> str:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowercase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowercase_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase ( self ) -> List[Any]:
pass
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Dict = CTRLModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Tuple:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def UpperCAmelCase ( self ) -> Tuple:
snake_case : str = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(lowercase_ )
snake_case : Union[str, Any] = torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=lowercase_ ) # Legal the president is
snake_case : Tuple = [
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
snake_case : Optional[Any] = model.generate(lowercase_ , do_sample=lowercase_ )
self.assertListEqual(output_ids[0].tolist() , lowercase_ )
| 704 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spm_char.model'}
lowerCamelCase : List[str] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCamelCase : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Tuple = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
snake_case : Dict = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
snake_case : Any = [1]
if token_ids_a is None:
return ([0] * len(A )) + suffix_ones
return ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 | 0 |
lowerCamelCase : str = 9.8_0665
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase = g ) -> str:
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 705 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 | 0 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCamelCase : Tuple = random.Random()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=1.0 ,lowercase=None ,lowercase=None ) -> Tuple:
if rng is None:
snake_case : Optional[int] = global_rng
snake_case : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=4_0_0 , A=2_0_0_0 , A=1 , A=0.0 , A=1_6_0_0_0 , A=True , A=True , ) -> Dict:
snake_case : Any = parent
snake_case : Optional[int] = batch_size
snake_case : int = min_seq_length
snake_case : Tuple = max_seq_length
snake_case : List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case : List[str] = feature_size
snake_case : Optional[Any] = padding_value
snake_case : List[str] = sampling_rate
snake_case : Optional[int] = return_attention_mask
snake_case : Dict = do_normalize
def UpperCAmelCase ( self ) -> Dict:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase ( self , A=False , A=False ) -> List[Any]:
def _flatten(A ):
return list(itertools.chain(*A ) )
if equal_length:
snake_case : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
snake_case : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case : Union[str, Any] = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
class __lowercase (__lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case = WavaVecaFeatureExtractor
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : str = WavaVecaFeatureExtractionTester(self )
def UpperCAmelCase ( self , A ) -> str:
self.assertTrue(np.all(np.mean(A , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCAmelCase ( self ) -> int:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case : int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case : Tuple = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
snake_case : int = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
snake_case : Dict = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test batched
snake_case : Union[str, Any] = feat_extract(A , return_tensors="""np""" ).input_values
snake_case : Optional[Any] = feat_extract(A , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case : List[str] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case : Tuple = np.asarray(A )
snake_case : List[Any] = feat_extract(A , return_tensors="""np""" ).input_values
snake_case : Union[str, Any] = feat_extract(A , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(A , A ):
self.assertTrue(np.allclose(A , A , atol=1e-3 ) )
def UpperCAmelCase ( self ) -> int:
snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : Tuple = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case : Dict = ["longest", "max_length", "do_not_pad"]
snake_case : List[str] = [None, 1_6_0_0, None]
for max_length, padding in zip(A , A ):
snake_case : str = feat_extract(A , padding=A , max_length=A , return_tensors="""np""" )
snake_case : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def UpperCAmelCase ( self ) -> Any:
snake_case : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : Any = range(8_0_0 , 1_4_0_0 , 2_0_0 )
snake_case : int = [floats_list((1, x) )[0] for x in lengths]
snake_case : Any = ["longest", "max_length", "do_not_pad"]
snake_case : Union[str, Any] = [None, 1_6_0_0, None]
for max_length, padding in zip(A , A ):
snake_case : Union[str, Any] = feat_extract(A , max_length=A , padding=A )
snake_case : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case : Dict = feat_extract(
A , truncation=A , max_length=1_0_0_0 , padding="""max_length""" , return_tensors="""np""" )
snake_case : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCAmelCase ( self ) -> str:
snake_case : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : Tuple = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case : List[str] = feat_extract(
A , truncation=A , max_length=1_0_0_0 , padding="""longest""" , return_tensors="""np""" )
snake_case : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
snake_case : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case : Any = feat_extract(
A , truncation=A , max_length=2_0_0_0 , padding="""longest""" , return_tensors="""np""" )
snake_case : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
@require_torch
def UpperCAmelCase ( self ) -> Union[str, Any]:
import torch
snake_case : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case : Optional[Any] = np.random.rand(1_0_0 ).astype(np.floataa )
snake_case : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case : Optional[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
snake_case : Dict = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCAmelCase ( self ) -> Any:
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
snake_case : List[Any] = WavaVecaConfig.from_pretrained(A )
snake_case : Tuple = WavaVecaFeatureExtractor.from_pretrained(A )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 706 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowerCamelCase : List[str] = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , **A ) -> List[Any]:
super().__init__(**__UpperCamelCase )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self , A , **A ) -> List[str]:
return super().__call__(__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase ( self , **A ) -> Union[str, Any]:
snake_case : int = {}
if "candidate_labels" in kwargs:
snake_case : int = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
snake_case : Tuple = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase ( self , A , A=None , A="This is a sound of {}." ) -> int:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
snake_case : Dict = requests.get(__UpperCamelCase ).content
else:
with open(__UpperCamelCase , """rb""" ) as f:
snake_case : Any = f.read()
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case : int = ffmpeg_read(__UpperCamelCase , self.feature_extractor.sampling_rate )
if not isinstance(__UpperCamelCase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
snake_case : Dict = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
snake_case : List[str] = candidate_labels
snake_case : Any = [hypothesis_template.format(__UpperCamelCase ) for x in candidate_labels]
snake_case : Optional[int] = self.tokenizer(__UpperCamelCase , return_tensors=self.framework , padding=__UpperCamelCase )
snake_case : List[Any] = [text_inputs]
return inputs
def UpperCAmelCase ( self , A ) -> Any:
snake_case : Optional[Any] = model_inputs.pop("""candidate_labels""" )
snake_case : List[str] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , __UpperCamelCase ):
snake_case : Optional[Any] = text_inputs[0]
else:
# Batching case.
snake_case : Any = text_inputs[0][0]
snake_case : Optional[Any] = self.model(**__UpperCamelCase , **__UpperCamelCase )
snake_case : Dict = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : Dict = model_outputs.pop("""candidate_labels""" )
snake_case : List[str] = model_outputs["""logits"""][0]
if self.framework == "pt":
snake_case : str = logits.softmax(dim=0 )
snake_case : Union[str, Any] = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
snake_case : Any = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(__UpperCamelCase , __UpperCamelCase ) , key=lambda A : -x[0] )
]
return result
| 707 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 0 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
lowerCamelCase : Union[str, Any] = '.'
if __name__ == "__main__":
lowerCamelCase : int = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
lowerCamelCase : Optional[Any] = []
lowerCamelCase : str = []
with open(doctest_file_path) as fp:
for line in fp:
lowerCamelCase : Optional[Any] = line.strip()
lowerCamelCase : int = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
lowerCamelCase : Optional[int] = '\n'.join(non_existent_paths)
raise ValueError(f"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 708 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684 | 0 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" ,[None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" ,["""default""", 0, 100 * 2**20, 900 * 2**20] )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config ,"""IN_MEMORY_MAX_SIZE""" ,lowerCAmelCase__ )
snake_case : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
snake_case : List[str] = dataset_size < in_memory_max_size
else:
snake_case : Union[str, Any] = False
snake_case : List[str] = is_small_dataset(lowerCAmelCase__ )
assert result == expected
| 709 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 0 |
from manim import *
class __lowercase (a__ ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
snake_case : List[Any] = Rectangle(height=0.5 , width=0.5 )
snake_case : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case : List[Any] = [mem.copy() for i in range(6 )]
snake_case : str = [mem.copy() for i in range(6 )]
snake_case : List[str] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
snake_case : Optional[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
snake_case : Optional[int] = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
snake_case : Any = Text("""CPU""" , font_size=2_4 )
snake_case : Union[str, Any] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase_ )
snake_case : int = [mem.copy() for i in range(1 )]
snake_case : Tuple = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
snake_case : Optional[int] = Text("""GPU""" , font_size=2_4 )
snake_case : Tuple = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
gpu.align_to(lowerCamelCase_ , lowerCamelCase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase_ )
snake_case : Optional[int] = [mem.copy() for i in range(6 )]
snake_case : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 )
snake_case : int = Text("""Model""" , font_size=2_4 )
snake_case : Any = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) , )
snake_case : Optional[Any] = MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=2_4 , )
snake_case : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case : Tuple = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase_ , run_time=2.5 ) , Write(lowerCamelCase_ ) , Write(lowerCamelCase_ ) )
self.add(lowerCamelCase_ )
snake_case : List[Any] = []
snake_case : List[Any] = []
snake_case : List[Any] = []
for i, rect in enumerate(lowerCamelCase_ ):
snake_case : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.7 )
cpu_target.move_to(lowerCamelCase_ )
cpu_target.generate_target()
snake_case : Dict = 0.46 / 4
snake_case : Dict = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase_ , buff=0.0 )
cpu_targs.append(lowerCamelCase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase_ ) )
second_animations.append(MoveToTarget(lowerCamelCase_ , run_time=1.5 ) )
self.play(*lowerCamelCase_ )
self.play(*lowerCamelCase_ )
self.wait()
| 710 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self , A , A ) -> List[Any]:
return f"""gaussian_noise_s={seed}_shape={"_".join([str(UpperCamelCase__ ) for s in shape] )}.npy"""
def UpperCAmelCase ( self ) -> Tuple:
super().tearDown()
gc.collect()
def UpperCAmelCase ( self , A=0 , A=(4, 4, 6_4, 6_4) , A=False ) -> Tuple:
snake_case : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
snake_case : List[str] = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__ ) ) , dtype=UpperCamelCase__ )
return image
def UpperCAmelCase ( self , A=False , A="CompVis/stable-diffusion-v1-4" ) -> str:
snake_case : Tuple = jnp.bfloataa if fpaa else jnp.floataa
snake_case : Dict = """bf16""" if fpaa else None
snake_case , snake_case : str = FlaxUNetaDConditionModel.from_pretrained(
UpperCamelCase__ , subfolder="""unet""" , dtype=UpperCamelCase__ , revision=UpperCamelCase__ )
return model, params
def UpperCAmelCase ( self , A=0 , A=(4, 7_7, 7_6_8) , A=False ) -> str:
snake_case : List[Any] = jnp.bfloataa if fpaa else jnp.floataa
snake_case : List[str] = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__ ) ) , dtype=UpperCamelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[1_7, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1_0_0_0, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def UpperCAmelCase ( self , A , A , A ) -> str:
snake_case , snake_case : Any = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=UpperCamelCase__ )
snake_case : Tuple = self.get_latents(UpperCamelCase__ , fpaa=UpperCamelCase__ )
snake_case : Optional[int] = self.get_encoder_hidden_states(UpperCamelCase__ , fpaa=UpperCamelCase__ )
snake_case : Any = model.apply(
{"""params""": params} , UpperCamelCase__ , jnp.array(UpperCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCamelCase__ , ).sample
assert sample.shape == latents.shape
snake_case : Union[str, Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case : Any = jnp.array(UpperCamelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[1_7, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1_0_0_0, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def UpperCAmelCase ( self , A , A , A ) -> str:
snake_case , snake_case : Dict = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=UpperCamelCase__ )
snake_case : Union[str, Any] = self.get_latents(UpperCamelCase__ , shape=(4, 4, 9_6, 9_6) , fpaa=UpperCamelCase__ )
snake_case : Optional[Any] = self.get_encoder_hidden_states(UpperCamelCase__ , shape=(4, 7_7, 1_0_2_4) , fpaa=UpperCamelCase__ )
snake_case : Optional[int] = model.apply(
{"""params""": params} , UpperCamelCase__ , jnp.array(UpperCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCamelCase__ , ).sample
assert sample.shape == latents.shape
snake_case : Tuple = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
snake_case : List[str] = jnp.array(UpperCamelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
| 711 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 | 0 |
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowerCamelCase : int = "src/transformers"
# Matches is_xxx_available()
lowerCamelCase : Dict = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
lowerCamelCase : int = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase : Optional[Any] = re.compile(r'\s+\"\S*\":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
lowerCamelCase : List[str] = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase : Optional[Any] = re.compile(r'^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase : Optional[int] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase : Optional[int] = re.compile(r'^\s+\"([^\"]+)\",')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase : List[str] = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
lowerCamelCase : str = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
lowerCamelCase : Union[str, Any] = re.compile(r'^\s*try:')
# Catches a line with else:
lowerCamelCase : Tuple = re.compile(r'^\s*else:')
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[int]:
if _re_test_backend.search(__lowerCAmelCase ) is None:
return None
snake_case : Dict = [b[0] for b in _re_backend.findall(__lowerCAmelCase )]
backends.sort()
return "_and_".join(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Optional[Any]:
with open(__lowerCAmelCase ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f:
snake_case : int = f.readlines()
snake_case : Union[str, Any] = 0
while line_index < len(__lowerCAmelCase ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__lowerCAmelCase ):
return None
# First grab the objects without a specific backend in _import_structure
snake_case : Union[str, Any] = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
snake_case : Tuple = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__lowerCAmelCase ):
snake_case : Any = _re_one_line_import_struct.search(__lowerCAmelCase ).groups()[0]
snake_case : Union[str, Any] = re.findall(R"""\[([^\]]+)\]""" ,__lowerCAmelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
snake_case : Any = _re_import_struct_key_value.search(__lowerCAmelCase )
if single_line_import_search is not None:
snake_case : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
snake_case : List[str] = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
snake_case : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
snake_case : Optional[int] = lines[line_index]
if _re_import_struct_add_one.search(__lowerCAmelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__lowerCAmelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__lowerCAmelCase ) is not None:
snake_case : Optional[Any] = _re_import_struct_add_many.search(__lowerCAmelCase ).groups()[0].split(""", """ )
snake_case : Any = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif _re_between_brackets.search(__lowerCAmelCase ) is not None:
snake_case : Optional[Any] = _re_between_brackets.search(__lowerCAmelCase ).groups()[0].split(""", """ )
snake_case : Dict = [obj[1:-1] for obj in imports if len(__lowerCAmelCase ) > 0]
objects.extend(__lowerCAmelCase )
elif _re_quote_object.search(__lowerCAmelCase ) is not None:
objects.append(_re_quote_object.search(__lowerCAmelCase ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
snake_case : Union[str, Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
snake_case : Tuple = []
while (
line_index < len(__lowerCAmelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
snake_case : Dict = lines[line_index]
snake_case : List[str] = _re_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
snake_case : Dict = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(__lowerCAmelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
snake_case : int = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
snake_case : List[str] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
snake_case : Any = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
snake_case : Union[str, Any] = lines[line_index]
snake_case : Union[str, Any] = _re_import.search(__lowerCAmelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
snake_case : int = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Union[str, Any]:
def find_duplicates(lowercase ):
return [k for k, v in collections.Counter(__lowerCAmelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
snake_case : Optional[int] = []
for key in import_dict_objects.keys():
snake_case : List[Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
snake_case : List[str] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
snake_case : Optional[int] = """base imports""" if key == """none""" else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
snake_case : Optional[Any] = []
for root, _, files in os.walk(__lowerCAmelCase ):
if "__init__.py" in files:
snake_case : Any = os.path.join(__lowerCAmelCase ,"""__init__.py""" )
snake_case : Optional[int] = parse_init(__lowerCAmelCase )
if objects is not None:
snake_case : Any = analyze_results(*__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case : int = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("""\n""".join(__lowerCAmelCase ) )
if len(__lowerCAmelCase ) > 0:
raise ValueError("""\n\n""".join(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
snake_case : List[str] = []
for path, directories, files in os.walk(__lowerCAmelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(__lowerCAmelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__lowerCAmelCase ) / folder).glob("""*.py""" ) ) ) == 0:
continue
snake_case : Any = str((Path(__lowerCAmelCase ) / folder).relative_to(__lowerCAmelCase ) )
snake_case : List[str] = short_path.replace(os.path.sep ,""".""" )
submodules.append(__lowerCAmelCase )
for fname in files:
if fname == "__init__.py":
continue
snake_case : Optional[int] = str((Path(__lowerCAmelCase ) / fname).relative_to(__lowerCAmelCase ) )
snake_case : Tuple = short_path.replace(""".py""" ,"""""" ).replace(os.path.sep ,""".""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(__lowerCAmelCase )
return submodules
lowerCamelCase : Optional[int] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
from transformers.utils import direct_transformers_import
snake_case : Dict = direct_transformers_import(__lowerCAmelCase )
snake_case : List[str] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__lowerCAmelCase ,"""__init__.py""" ) ,"""r""" ) as f:
snake_case : Tuple = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" ,__lowerCAmelCase ) ) )
snake_case : Optional[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__lowerCAmelCase ) > 0:
snake_case : List[Any] = """\n""".join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f"""{list_of_modules}\n"""
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 712 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowercase (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
_snake_case = DanceDiffusionPipeline
_snake_case = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_snake_case = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
_snake_case = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
snake_case : List[Any] = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_UpperCAmelCase , use_timestep_embedding=_UpperCAmelCase , time_embedding_type="""fourier""" , mid_block_type="""UNetMidBlock1D""" , down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") , up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") , )
snake_case : int = IPNDMScheduler()
snake_case : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCAmelCase ( self , A , A=0 ) -> Dict:
if str(_UpperCAmelCase ).startswith("""mps""" ):
snake_case : Dict = torch.manual_seed(_UpperCAmelCase )
else:
snake_case : Any = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
snake_case : Any = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case : Optional[Any] = self.get_dummy_components()
snake_case : Union[str, Any] = DanceDiffusionPipeline(**_UpperCAmelCase )
snake_case : int = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case : int = self.get_dummy_inputs(_UpperCAmelCase )
snake_case : Dict = pipe(**_UpperCAmelCase )
snake_case : List[Any] = output.audios
snake_case : int = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
snake_case : Optional[Any] = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCAmelCase ( self ) -> Any:
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase ( self ) -> Any:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def UpperCAmelCase ( self ) -> Union[str, Any]:
return super().test_save_load_optional_components()
@skip_mps
def UpperCAmelCase ( self ) -> Any:
return super().test_attention_slicing_forward_pass()
def UpperCAmelCase ( self ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> int:
snake_case : Tuple = torch_device
snake_case : Optional[Any] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
snake_case : List[str] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case : Tuple = torch.manual_seed(0 )
snake_case : Optional[int] = pipe(generator=_UpperCAmelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
snake_case : Any = output.audios
snake_case : List[str] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
snake_case : Dict = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ) -> Tuple:
snake_case : List[str] = torch_device
snake_case : int = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" , torch_dtype=torch.floataa )
snake_case : Tuple = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
snake_case : Optional[Any] = torch.manual_seed(0 )
snake_case : Optional[Any] = pipe(generator=_UpperCAmelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
snake_case : Any = output.audios
snake_case : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
snake_case : str = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 713 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 0 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=() ,lowercase=None ,lowercase="no" ,lowercase="29500" ) -> Tuple:
snake_case : Optional[Any] = False
snake_case : Union[str, Any] = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
snake_case : List[str] = True
elif "IPython" in sys.modules:
snake_case : int = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
snake_case : Union[str, Any] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
f"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" ,UpperCAmelCase__ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
snake_case : Dict = 8
snake_case : int = PrepareForLaunch(UpperCAmelCase__ ,distributed_type="""TPU""" )
print(f"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(UpperCAmelCase__ ,args=UpperCAmelCase__ ,nprocs=UpperCAmelCase__ ,start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*UpperCAmelCase__ )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCAmelCase__ ,master_addr="""127.0.01""" ,master_port=UpperCAmelCase__ ,mixed_precision=UpperCAmelCase__ ):
snake_case : str = PrepareForLaunch(UpperCAmelCase__ ,distributed_type="""MULTI_GPU""" )
print(f"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(UpperCAmelCase__ ,args=UpperCAmelCase__ ,nprocs=UpperCAmelCase__ ,start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
snake_case : Optional[int] = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase=() ,lowercase=2 ) -> Union[str, Any]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCAmelCase__ ,master_addr="""127.0.01""" ,master_port="""29500""" ,accelerate_mixed_precision="""no""" ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu="""yes""" ,):
snake_case : List[str] = PrepareForLaunch(UpperCAmelCase__ ,debug=UpperCAmelCase__ )
start_processes(UpperCAmelCase__ ,args=UpperCAmelCase__ ,nprocs=UpperCAmelCase__ ,start_method="""fork""" )
| 714 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Any:
if isinstance(UpperCAmelCase__ ,torch.Tensor ):
return image
elif isinstance(UpperCAmelCase__ ,PIL.Image.Image ):
snake_case : Optional[int] = [image]
if isinstance(image[0] ,PIL.Image.Image ):
snake_case : Union[str, Any] = [np.array(i.resize((w, h) ,resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
snake_case : Tuple = np.concatenate(UpperCAmelCase__ ,axis=0 )
snake_case : Tuple = np.array(UpperCAmelCase__ ).astype(np.floataa ) / 255.0
snake_case : Optional[Any] = image.transpose(0 ,3 ,1 ,2 )
snake_case : Tuple = 2.0 * image - 1.0
snake_case : Optional[Any] = torch.from_numpy(UpperCAmelCase__ )
elif isinstance(image[0] ,torch.Tensor ):
snake_case : Tuple = torch.cat(UpperCAmelCase__ ,dim=0 )
return image
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase=0.9995 ) -> List[str]:
if not isinstance(UpperCAmelCase__ ,np.ndarray ):
snake_case : Optional[Any] = True
snake_case : Dict = va.device
snake_case : str = va.cpu().numpy()
snake_case : Union[str, Any] = va.cpu().numpy()
snake_case : Optional[int] = np.sum(va * va / (np.linalg.norm(UpperCAmelCase__ ) * np.linalg.norm(UpperCAmelCase__ )) )
if np.abs(UpperCAmelCase__ ) > DOT_THRESHOLD:
snake_case : List[Any] = (1 - t) * va + t * va
else:
snake_case : str = np.arccos(UpperCAmelCase__ )
snake_case : Optional[int] = np.sin(UpperCAmelCase__ )
snake_case : Optional[Any] = theta_a * t
snake_case : List[Any] = np.sin(UpperCAmelCase__ )
snake_case : Optional[Any] = np.sin(theta_a - theta_t ) / sin_theta_a
snake_case : Optional[int] = sin_theta_t / sin_theta_a
snake_case : List[Any] = sa * va + sa * va
if inputs_are_torch:
snake_case : Union[str, Any] = torch.from_numpy(UpperCAmelCase__ ).to(UpperCAmelCase__ )
return va
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[Any]:
snake_case : Union[str, Any] = F.normalize(UpperCAmelCase__ ,dim=-1 )
snake_case : Any = F.normalize(UpperCAmelCase__ ,dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Dict:
for param in model.parameters():
snake_case : Dict = value
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A , A , A , A , A , A , A=None , A=None , A=None , ) -> Dict:
super().__init__()
self.register_modules(
vae=__UpperCamelCase , text_encoder=__UpperCamelCase , clip_model=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , coca_model=__UpperCamelCase , coca_tokenizer=__UpperCamelCase , coca_transform=__UpperCamelCase , )
snake_case : Dict = (
feature_extractor.size
if isinstance(feature_extractor.size , __UpperCamelCase )
else feature_extractor.size["""shortest_edge"""]
)
snake_case : int = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , __UpperCamelCase )
set_requires_grad(self.clip_model , __UpperCamelCase )
def UpperCAmelCase ( self , A = "auto" ) -> str:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def UpperCAmelCase ( self ) -> Dict:
self.enable_attention_slicing(__UpperCamelCase )
def UpperCAmelCase ( self ) -> Union[str, Any]:
set_requires_grad(self.vae , __UpperCamelCase )
def UpperCAmelCase ( self ) -> Dict:
set_requires_grad(self.vae , __UpperCamelCase )
def UpperCAmelCase ( self ) -> str:
set_requires_grad(self.unet , __UpperCamelCase )
def UpperCAmelCase ( self ) -> str:
set_requires_grad(self.unet , __UpperCamelCase )
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
# get the original timestep using init_timestep
snake_case : Union[str, Any] = min(int(num_inference_steps * strength ) , __UpperCamelCase )
snake_case : Union[str, Any] = max(num_inference_steps - init_timestep , 0 )
snake_case : Tuple = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCAmelCase ( self , A , A , A , A , A , A=None ) -> Dict:
if not isinstance(__UpperCamelCase , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(__UpperCamelCase )}""" )
snake_case : Optional[Any] = image.to(device=__UpperCamelCase , dtype=__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
snake_case : str = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__UpperCamelCase )
]
snake_case : Tuple = torch.cat(__UpperCamelCase , dim=0 )
else:
snake_case : List[str] = self.vae.encode(__UpperCamelCase ).latent_dist.sample(__UpperCamelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case : Any = 0.1_82_15 * init_latents
snake_case : Tuple = init_latents.repeat_interleave(__UpperCamelCase , dim=0 )
snake_case : Any = randn_tensor(init_latents.shape , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
# get latents
snake_case : Optional[int] = self.scheduler.add_noise(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case : int = init_latents
return latents
def UpperCAmelCase ( self , A ) -> List[str]:
snake_case : Tuple = self.coca_transform(__UpperCamelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
snake_case : str = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
snake_case : Optional[int] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split("""<end_of_text>""" )[0].replace("""<start_of_text>""" , """""" ).rstrip(""" .,""" )
def UpperCAmelCase ( self , A , A ) -> Optional[Any]:
snake_case : Dict = self.feature_extractor.preprocess(__UpperCamelCase )
snake_case : List[Any] = torch.from_numpy(clip_image_input["""pixel_values"""][0] ).unsqueeze(0 ).to(self.device ).half()
snake_case : Tuple = self.clip_model.get_image_features(__UpperCamelCase )
snake_case : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__UpperCamelCase )
snake_case : Any = image_embeddings_clip.repeat_interleave(__UpperCamelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCAmelCase ( self , A , A , A , A , A , A , A , ) -> Optional[Any]:
snake_case : List[Any] = latents.detach().requires_grad_()
snake_case : List[str] = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
snake_case : List[str] = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
snake_case : Optional[int] = self.scheduler.alphas_cumprod[timestep]
snake_case : Dict = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case : List[str] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
snake_case : Optional[Any] = torch.sqrt(__UpperCamelCase )
snake_case : Dict = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , __UpperCamelCase ):
snake_case : str = self.scheduler.sigmas[index]
snake_case : str = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case : str = 1 / 0.1_82_15 * sample
snake_case : List[str] = self.vae.decode(__UpperCamelCase ).sample
snake_case : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
snake_case : Optional[Any] = transforms.Resize(self.feature_extractor_size )(__UpperCamelCase )
snake_case : int = self.normalize(__UpperCamelCase ).to(latents.dtype )
snake_case : Any = self.clip_model.get_image_features(__UpperCamelCase )
snake_case : Any = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=__UpperCamelCase )
snake_case : str = spherical_dist_loss(__UpperCamelCase , __UpperCamelCase ).mean() * clip_guidance_scale
snake_case : Any = -torch.autograd.grad(__UpperCamelCase , __UpperCamelCase )[0]
if isinstance(self.scheduler , __UpperCamelCase ):
snake_case : Dict = latents.detach() + grads * (sigma**2)
snake_case : Optional[Any] = noise_pred_original
else:
snake_case : str = noise_pred_original - torch.sqrt(__UpperCamelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , A , A , A = None , A = None , A = 5_1_2 , A = 5_1_2 , A = 0.6 , A = 5_0 , A = 7.5 , A = 1 , A = 0.0 , A = 1_0_0 , A = None , A = "pil" , A = True , A = 0.8 , A = 0.1 , A = 0.1 , ) -> int:
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(__UpperCamelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(__UpperCamelCase , torch.Generator ) and batch_size > 1:
snake_case : List[Any] = [generator] + [None] * (batch_size - 1)
snake_case : List[Any] = [
("""model""", self.coca_model is None),
("""tokenizer""", self.coca_tokenizer is None),
("""transform""", self.coca_transform is None),
]
snake_case : List[Any] = [x[0] for x in coca_is_none if x[1]]
snake_case : Union[str, Any] = """, """.join(__UpperCamelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__UpperCamelCase ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
snake_case : Tuple = self.get_image_description(__UpperCamelCase )
if style_prompt is None:
if len(__UpperCamelCase ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
snake_case : Any = self.get_image_description(__UpperCamelCase )
# get prompt text embeddings for content and style
snake_case : Union[str, Any] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors="""pt""" , )
snake_case : Dict = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
snake_case : Dict = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors="""pt""" , )
snake_case : str = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
snake_case : str = slerp(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# duplicate text embeddings for each generation per prompt
snake_case : Dict = text_embeddings.repeat_interleave(__UpperCamelCase , dim=0 )
# set timesteps
snake_case : List[Any] = """offset""" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
snake_case : List[Any] = {}
if accepts_offset:
snake_case : Optional[Any] = 1
self.scheduler.set_timesteps(__UpperCamelCase , **__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
snake_case , snake_case : Dict = self.get_timesteps(__UpperCamelCase , __UpperCamelCase , self.device )
snake_case : Optional[int] = timesteps[:1].repeat(__UpperCamelCase )
# Preprocess image
snake_case : int = preprocess(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case : Union[str, Any] = self.prepare_latents(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , text_embeddings.dtype , self.device , __UpperCamelCase )
snake_case : Tuple = preprocess(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case : Optional[Any] = self.prepare_latents(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , text_embeddings.dtype , self.device , __UpperCamelCase )
snake_case : str = slerp(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if clip_guidance_scale > 0:
snake_case : Any = self.get_clip_image_embeddings(__UpperCamelCase , __UpperCamelCase )
snake_case : Optional[int] = self.get_clip_image_embeddings(__UpperCamelCase , __UpperCamelCase )
snake_case : List[str] = slerp(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case : Dict = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case : Any = content_text_input.input_ids.shape[-1]
snake_case : Optional[Any] = self.tokenizer([""""""] , padding="""max_length""" , max_length=__UpperCamelCase , return_tensors="""pt""" )
snake_case : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
snake_case : int = uncond_embeddings.repeat_interleave(__UpperCamelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case : Optional[int] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case : Union[str, Any] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
snake_case : str = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
snake_case : Any = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
snake_case : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
snake_case : Dict = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : List[str] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : int = {}
if accepts_eta:
snake_case : Any = eta
# check if the scheduler accepts generator
snake_case : List[Any] = """generator""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
snake_case : List[str] = generator
with self.progress_bar(total=__UpperCamelCase ):
for i, t in enumerate(__UpperCamelCase ):
# expand the latents if we are doing classifier free guidance
snake_case : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : Union[str, Any] = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
snake_case : str = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
snake_case , snake_case : str = noise_pred.chunk(2 )
snake_case : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
snake_case : Optional[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
snake_case , snake_case : Optional[int] = self.cond_fn(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )
# compute the previous noisy sample x_t -> x_t-1
snake_case : Optional[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
snake_case : Any = 1 / 0.1_82_15 * latents
snake_case : List[Any] = self.vae.decode(__UpperCamelCase ).sample
snake_case : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
snake_case : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case : Optional[int] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 715 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 0 |
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = TypeVar('DatasetType', Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = None ,lowercase = None ,lowercase = None ,lowercase = None ,lowercase = "first_exhausted" ,) -> DatasetType:
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("""Unable to interleave an empty list of datasets.""" )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_UpperCamelCase ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.""" )
if i == 0:
snake_case , snake_case : str = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase ,_UpperCamelCase ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,info=_UpperCamelCase ,split=_UpperCamelCase ,stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,info=_UpperCamelCase ,split=_UpperCamelCase ,stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = None ,lowercase = None ,lowercase = 0 ,) -> DatasetType:
if not dsets:
raise ValueError("""Unable to concatenate an empty list of datasets.""" )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase ,(Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase ,(DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"""is an empty dataset dictionary.""" )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_UpperCamelCase ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.""" )
if i == 0:
snake_case , snake_case : List[str] = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase ,_UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase ,_UpperCamelCase ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase ,info=_UpperCamelCase ,split=_UpperCamelCase ,axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase ,info=_UpperCamelCase ,split=_UpperCamelCase ,axis=_UpperCamelCase )
| 716 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Dict:
snake_case : List[Any] = old_name
if "patch_embed" in old_name:
snake_case , snake_case , snake_case : int = old_name.split(""".""" )
if layer == "0":
snake_case : Dict = old_name.replace("""0""" ,"""convolution1""" )
elif layer == "1":
snake_case : Optional[Any] = old_name.replace("""1""" ,"""batchnorm_before""" )
elif layer == "3":
snake_case : Dict = old_name.replace("""3""" ,"""convolution2""" )
else:
snake_case : int = old_name.replace("""4""" ,"""batchnorm_after""" )
if "network" in old_name and re.search(R"""\d\.\d""" ,lowercase ):
snake_case : str = R"""\b\d{2}\b"""
if bool(re.search(lowercase ,lowercase ) ):
snake_case : Optional[Any] = re.search(R"""\d\.\d\d.""" ,lowercase ).group()
else:
snake_case : str = re.search(R"""\d\.\d.""" ,lowercase ).group()
if int(match[0] ) < 6:
snake_case : List[str] = old_name.replace(lowercase ,"""""" )
snake_case : Any = trimmed_name.replace("""network""" ,match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
snake_case : int = """intermediate_stages.""" + trimmed_name
else:
snake_case : Any = old_name.replace(lowercase ,"""""" )
if int(match[2] ) < num_meta4D_last_stage:
snake_case : int = trimmed_name.replace("""network""" ,"""meta4D_layers.blocks.""" + match[2] )
else:
snake_case : Tuple = str(int(match[2] ) - num_meta4D_last_stage )
snake_case : Optional[int] = trimmed_name.replace("""network""" ,"""meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
snake_case : str = trimmed_name.replace("""norm1""" ,"""layernorm1""" )
elif "norm2" in old_name:
snake_case : Optional[Any] = trimmed_name.replace("""norm2""" ,"""layernorm2""" )
elif "fc1" in old_name:
snake_case : Optional[Any] = trimmed_name.replace("""fc1""" ,"""linear_in""" )
elif "fc2" in old_name:
snake_case : Any = trimmed_name.replace("""fc2""" ,"""linear_out""" )
snake_case : Optional[int] = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(R""".\d.""" ,lowercase ):
snake_case : List[str] = old_name.replace("""network""" ,"""intermediate_stages""" )
if "fc" in new_name:
snake_case : Dict = new_name.replace("""fc""" ,"""convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
snake_case : Optional[Any] = new_name.replace("""norm1""" ,"""batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
snake_case : Tuple = new_name.replace("""norm2""" ,"""batchnorm_after""" )
if "proj" in new_name:
snake_case : Dict = new_name.replace("""proj""" ,"""projection""" )
if "dist_head" in new_name:
snake_case : Dict = new_name.replace("""dist_head""" ,"""distillation_classifier""" )
elif "head" in new_name:
snake_case : Tuple = new_name.replace("""head""" ,"""classifier""" )
elif "patch_embed" in new_name:
snake_case : Tuple = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
snake_case : Any = new_name.replace("""norm""" ,"""layernorm""" )
snake_case : Optional[Any] = """efficientformer.""" + new_name
else:
snake_case : Optional[Any] = """efficientformer.encoder.""" + new_name
return new_name
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Tuple:
for key in checkpoint.copy().keys():
snake_case : Optional[Any] = checkpoint.pop(lowercase )
snake_case : Tuple = val
return checkpoint
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
snake_case : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : str = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return image
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ) -> Union[str, Any]:
snake_case : List[str] = torch.load(lowercase ,map_location="""cpu""" )["""model"""]
snake_case : Optional[int] = EfficientFormerConfig.from_json_file(lowercase )
snake_case : Optional[Any] = EfficientFormerForImageClassificationWithTeacher(lowercase )
snake_case : Any = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
snake_case : List[Any] = config.depths[-1] - config.num_metaad_blocks + 1
snake_case : Any = convert_torch_checkpoint(lowercase ,lowercase )
model.load_state_dict(lowercase )
model.eval()
snake_case : Dict = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
snake_case : str = prepare_img()
snake_case : Optional[int] = 256
snake_case : str = 224
snake_case : Optional[int] = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} ,crop_size={"""height""": crop_size, """width""": crop_size} ,resample=pillow_resamplings["""bicubic"""] ,)
snake_case : Optional[int] = processor(images=lowercase ,return_tensors="""pt""" ).pixel_values
# original processing pipeline
snake_case : List[Any] = Compose(
[
Resize(lowercase ,interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(lowercase ),
ToTensor(),
Normalize(lowercase ,lowercase ),
] )
snake_case : Union[str, Any] = image_transforms(lowercase ).unsqueeze(0 )
assert torch.allclose(lowercase ,lowercase )
snake_case : int = model(lowercase )
snake_case : Optional[int] = outputs.logits
snake_case : Dict = (1, 1000)
if "l1" in model_name:
snake_case : Union[str, Any] = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] ,lowercase ,atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
snake_case : Any = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] ,lowercase ,atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
snake_case : Any = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowercase )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" ,commit_message="""Add model""" ,use_temp_dir=lowercase ,)
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" ,commit_message="""Add image processor""" ,use_temp_dir=lowercase ,)
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase : int = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 717 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=2 , A=True , A=False , A=1_0 , A=3 , A=3_2 * 4 , A=3_2 * 6 , A=4 , A=3_2 , ) -> str:
snake_case : List[Any] = parent
snake_case : Union[str, Any] = batch_size
snake_case : Dict = is_training
snake_case : Optional[int] = use_auxiliary_loss
snake_case : Optional[Any] = num_queries
snake_case : Dict = num_channels
snake_case : Tuple = min_size
snake_case : Tuple = max_size
snake_case : List[Any] = num_labels
snake_case : List[Any] = mask_feature_size
def UpperCAmelCase ( self ) -> List[str]:
snake_case : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
snake_case : Any = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
snake_case : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
snake_case : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
snake_case : Tuple = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase ( self ) -> List[str]:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCAmelCase ( self ) -> int:
snake_case : Union[str, Any] = self.prepare_config_and_inputs()
snake_case : Optional[int] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def UpperCAmelCase ( self , A , A ) -> int:
snake_case : List[str] = output.encoder_hidden_states
snake_case : str = output.pixel_decoder_hidden_states
snake_case : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_config.decoder_layers )
def UpperCAmelCase ( self , A , A , A , A=False ) -> Tuple:
with torch.no_grad():
snake_case : List[Any] = MaskFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
snake_case : List[str] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
snake_case : str = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase ( self , A , A , A , A , A ) -> Dict:
snake_case : List[Any] = MaskFormerForInstanceSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(A ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
snake_case : Any = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
snake_case : Any = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowercase (__lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_snake_case = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_snake_case = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def UpperCAmelCase ( self ) -> Any:
snake_case : Optional[int] = MaskFormerModelTester(self )
snake_case : str = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def UpperCAmelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> str:
snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def UpperCAmelCase ( self ) -> List[str]:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def UpperCAmelCase ( self ) -> Dict:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def UpperCAmelCase ( self ) -> Optional[int]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`""" )
def UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase ( self ) -> str:
pass
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Dict = model_class(lowerCamelCase__ )
snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Any = [*signature.parameters.keys()]
snake_case : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def UpperCAmelCase ( self ) -> Dict:
for model_name in ["facebook/maskformer-swin-small-coco"]:
snake_case : Tuple = MaskFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Any = (self.model_tester.min_size,) * 2
snake_case : List[str] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=lowerCamelCase__ ),
'''class_labels''': torch.zeros(2 , 1_0 , device=lowerCamelCase__ ).long(),
}
snake_case : List[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCamelCase__ )
snake_case : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Optional[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
snake_case : Any = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase ( self ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
snake_case : List[str] = self.all_model_classes[1]
snake_case : Any = self.model_tester.prepare_config_and_inputs()
snake_case : Union[str, Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
snake_case : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def UpperCAmelCase ( self ) -> Dict:
snake_case : Optional[int] = self.all_model_classes[1]
snake_case : Any = self.model_tester.prepare_config_and_inputs()
snake_case : Any = True
snake_case : int = True
snake_case : List[Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
snake_case : Union[str, Any] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
snake_case : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
snake_case : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case : Dict = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase : Optional[int] = 1e-4
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
snake_case : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self ) -> Union[str, Any]:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def UpperCAmelCase ( self ) -> Tuple:
snake_case : str = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(lowerCamelCase__ )
snake_case : List[str] = self.default_image_processor
snake_case : List[str] = prepare_img()
snake_case : Any = image_processor(lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
snake_case : Union[str, Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
snake_case : Optional[Any] = model(**lowerCamelCase__ )
snake_case : Dict = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
snake_case : Union[str, Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
snake_case : Optional[Any] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def UpperCAmelCase ( self ) -> Any:
snake_case : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(lowerCamelCase__ )
.eval()
)
snake_case : Union[str, Any] = self.default_image_processor
snake_case : Optional[int] = prepare_img()
snake_case : Dict = image_processor(lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
snake_case : int = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
snake_case : Tuple = model(**lowerCamelCase__ )
# masks_queries_logits
snake_case : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case : Union[str, Any] = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
snake_case : Any = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
snake_case : List[str] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case : Optional[int] = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : int = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(lowerCamelCase__ )
.eval()
)
snake_case : Tuple = self.default_image_processor
snake_case : Any = prepare_img()
snake_case : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
snake_case : Optional[int] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
snake_case : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
snake_case : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case : Optional[int] = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
snake_case : Any = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
snake_case : str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case : List[str] = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Any = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(lowerCamelCase__ )
.eval()
)
snake_case : Dict = self.default_image_processor
snake_case : Optional[Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="""pt""" , )
snake_case : int = inputs['''pixel_values'''].to(lowerCamelCase__ )
snake_case : Tuple = [el.to(lowerCamelCase__ ) for el in inputs['''mask_labels''']]
snake_case : Optional[int] = [el.to(lowerCamelCase__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
snake_case : Optional[int] = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None ) | 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowerCamelCase : Any = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
with open(snake_case_ ,"""rb""" ) as f:
snake_case : List[Any] = Image.open(snake_case_ )
return im.convert("""RGB""" )
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = field(
default=UpperCamelCase_ , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """A folder containing the training data."""} )
_snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """A folder containing the validation data."""} )
_snake_case = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCAmelCase ( self ) -> Tuple:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class __lowercase :
"""simple docstring"""
_snake_case = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCamelCase_ )} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
_snake_case = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case = field(default=UpperCamelCase_ , metadata={"""help""": """Name or path of preprocessor config."""} )
_snake_case = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
_snake_case = field(
default=UpperCamelCase_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : List[str] = torch.stack([example["""pixel_values"""] for example in examples] )
snake_case : List[Any] = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" ,snake_case_ ,snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case : Tuple = training_args.get_process_log_level()
logger.setLevel(snake_case_ )
transformers.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
snake_case : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case : int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
snake_case : List[str] = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir ,task="""image-classification""" ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
snake_case : Any = {}
if data_args.train_dir is not None:
snake_case : str = os.path.join(data_args.train_dir ,"""**""" )
if data_args.validation_dir is not None:
snake_case : Tuple = os.path.join(data_args.validation_dir ,"""**""" )
snake_case : int = load_dataset(
"""imagefolder""" ,data_files=snake_case_ ,cache_dir=model_args.cache_dir ,task="""image-classification""" ,)
# If we don't have a validation split, split off a percentage of train as validation.
snake_case : Tuple = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split ,snake_case_ ) and data_args.train_val_split > 0.0:
snake_case : Optional[int] = dataset['''train'''].train_test_split(data_args.train_val_split )
snake_case : List[Any] = split['''train''']
snake_case : List[Any] = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
snake_case : int = dataset['''train'''].features['''labels'''].names
snake_case : List[str] = {}, {}
for i, label in enumerate(snake_case_ ):
snake_case : Optional[Any] = str(snake_case_ )
snake_case : Optional[int] = label
# Load the accuracy metric from the datasets package
snake_case : List[Any] = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase ):
return metric.compute(predictions=np.argmax(p.predictions ,axis=1 ) ,references=p.label_ids )
snake_case : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(snake_case_ ) ,labelaid=snake_case_ ,idalabel=snake_case_ ,finetuning_task="""image-classification""" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
snake_case : int = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) ,config=snake_case_ ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
snake_case : Optional[int] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
snake_case : Optional[Any] = image_processor.size['''shortest_edge''']
else:
snake_case : Optional[Any] = (image_processor.size['''height'''], image_processor.size['''width'''])
snake_case : int = Normalize(mean=image_processor.image_mean ,std=image_processor.image_std )
snake_case : Tuple = Compose(
[
RandomResizedCrop(snake_case_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
snake_case : Union[str, Any] = Compose(
[
Resize(snake_case_ ),
CenterCrop(snake_case_ ),
ToTensor(),
normalize,
] )
def train_transforms(lowercase ):
snake_case : Any = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(lowercase ):
snake_case : int = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
snake_case : Optional[Any] = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(snake_case_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
snake_case : List[str] = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(snake_case_ )
# Initalize our trainer
snake_case : Optional[Any] = Trainer(
model=snake_case_ ,args=snake_case_ ,train_dataset=dataset["""train"""] if training_args.do_train else None ,eval_dataset=dataset["""validation"""] if training_args.do_eval else None ,compute_metrics=snake_case_ ,tokenizer=snake_case_ ,data_collator=snake_case_ ,)
# Training
if training_args.do_train:
snake_case : List[str] = None
if training_args.resume_from_checkpoint is not None:
snake_case : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case : List[Any] = last_checkpoint
snake_case : str = trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model()
trainer.log_metrics("""train""" ,train_result.metrics )
trainer.save_metrics("""train""" ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case : Optional[int] = trainer.evaluate()
trainer.log_metrics("""eval""" ,snake_case_ )
trainer.save_metrics("""eval""" ,snake_case_ )
# Write model card and (optionally) push to hub
snake_case : List[Any] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case_ )
else:
trainer.create_model_card(**snake_case_ )
if __name__ == "__main__":
main()
| 719 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 720 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 0 |
from __future__ import annotations
import pandas as pd
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = [0] * no_of_processes
snake_case : List[str] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowerCamelCase__ ):
snake_case : str = burst_time[i]
snake_case : Union[str, Any] = 0
snake_case : Union[str, Any] = 0
snake_case : Optional[Any] = 999999999
snake_case : Union[str, Any] = 0
snake_case : Tuple = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowerCamelCase__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
snake_case : Optional[int] = remaining_time[j]
snake_case : Dict = j
snake_case : Tuple = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
snake_case : Any = remaining_time[short]
if minm == 0:
snake_case : List[Any] = 999999999
if remaining_time[short] == 0:
complete += 1
snake_case : List[str] = False
# Find finish time of current process
snake_case : int = increment_time + 1
# Calculate waiting time
snake_case : int = finish_time - arrival_time[short]
snake_case : Union[str, Any] = finar - burst_time[short]
if waiting_time[short] < 0:
snake_case : Union[str, Any] = 0
# Increment time
increment_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> list[int]:
snake_case : Optional[Any] = [0] * no_of_processes
for i in range(lowerCamelCase__ ):
snake_case : List[str] = burst_time[i] + waiting_time[i]
return turn_around_time
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> None:
snake_case : str = 0
snake_case : Tuple = 0
for i in range(lowerCamelCase__ ):
snake_case : Union[str, Any] = total_waiting_time + waiting_time[i]
snake_case : Optional[Any] = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("""Average turn around time =""" ,total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
lowerCamelCase : List[Any] = int(input())
lowerCamelCase : List[str] = [0] * no_of_processes
lowerCamelCase : List[Any] = [0] * no_of_processes
lowerCamelCase : Any = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
lowerCamelCase , lowerCamelCase : Optional[Any] = map(int, input().split())
lowerCamelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase : List[Any] = burst_time
lowerCamelCase : int = no_of_processes
lowerCamelCase : str = waiting_time
lowerCamelCase : int = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowerCamelCase : List[str] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 721 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __lowercase (nn.Module ):
"""simple docstring"""
_snake_case = 42
_snake_case = 42
_snake_case = 0.0
_snake_case = 1
_snake_case = 1
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = jnp.floataa
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : int = []
snake_case : List[str] = []
for i in range(self.num_layers ):
snake_case : int = self.in_channels if i == 0 else self.out_channels
snake_case : str = FlaxResnetBlockaD(
in_channels=A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A )
snake_case : Any = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(A )
snake_case : Tuple = resnets
snake_case : Tuple = attentions
if self.add_downsample:
snake_case : List[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , A , A , A , A=True ) -> List[Any]:
snake_case : int = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case : Union[str, Any] = resnet(A , A , deterministic=A )
snake_case : Tuple = attn(A , A , deterministic=A )
output_states += (hidden_states,)
if self.add_downsample:
snake_case : int = self.downsamplers_a(A )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowercase (nn.Module ):
"""simple docstring"""
_snake_case = 42
_snake_case = 42
_snake_case = 0.0
_snake_case = 1
_snake_case = True
_snake_case = jnp.floataa
def UpperCAmelCase ( self ) -> int:
snake_case : Optional[Any] = []
for i in range(self.num_layers ):
snake_case : List[Any] = self.in_channels if i == 0 else self.out_channels
snake_case : Union[str, Any] = FlaxResnetBlockaD(
in_channels=A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A )
snake_case : List[Any] = resnets
if self.add_downsample:
snake_case : List[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , A , A , A=True ) -> Any:
snake_case : List[Any] = ()
for resnet in self.resnets:
snake_case : Any = resnet(A , A , deterministic=A )
output_states += (hidden_states,)
if self.add_downsample:
snake_case : Dict = self.downsamplers_a(A )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowercase (nn.Module ):
"""simple docstring"""
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = 0.0
_snake_case = 1
_snake_case = 1
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = jnp.floataa
def UpperCAmelCase ( self ) -> Any:
snake_case : int = []
snake_case : List[Any] = []
for i in range(self.num_layers ):
snake_case : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case : Tuple = self.prev_output_channel if i == 0 else self.out_channels
snake_case : List[str] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A )
snake_case : List[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(A )
snake_case : int = resnets
snake_case : int = attentions
if self.add_upsample:
snake_case : Optional[int] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , A , A , A , A , A=True ) -> int:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case : Dict = res_hidden_states_tuple[-1]
snake_case : Optional[Any] = res_hidden_states_tuple[:-1]
snake_case : Union[str, Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case : Dict = resnet(A , A , deterministic=A )
snake_case : Union[str, Any] = attn(A , A , deterministic=A )
if self.add_upsample:
snake_case : Dict = self.upsamplers_a(A )
return hidden_states
class __lowercase (nn.Module ):
"""simple docstring"""
_snake_case = 42
_snake_case = 42
_snake_case = 42
_snake_case = 0.0
_snake_case = 1
_snake_case = True
_snake_case = jnp.floataa
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Optional[Any] = []
for i in range(self.num_layers ):
snake_case : Optional[int] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case : Tuple = self.prev_output_channel if i == 0 else self.out_channels
snake_case : Union[str, Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A )
snake_case : int = resnets
if self.add_upsample:
snake_case : Any = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , A , A , A , A=True ) -> str:
for resnet in self.resnets:
# pop res hidden states
snake_case : List[Any] = res_hidden_states_tuple[-1]
snake_case : Optional[int] = res_hidden_states_tuple[:-1]
snake_case : List[str] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case : Union[str, Any] = resnet(A , A , deterministic=A )
if self.add_upsample:
snake_case : Any = self.upsamplers_a(A )
return hidden_states
class __lowercase (nn.Module ):
"""simple docstring"""
_snake_case = 42
_snake_case = 0.0
_snake_case = 1
_snake_case = 1
_snake_case = False
_snake_case = False
_snake_case = jnp.floataa
def UpperCAmelCase ( self ) -> Union[str, Any]:
# there is always at least one resnet
snake_case : Tuple = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case : Dict = []
for _ in range(self.num_layers ):
snake_case : str = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(A )
snake_case : str = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A )
snake_case : Optional[Any] = resnets
snake_case : str = attentions
def __call__( self , A , A , A , A=True ) -> Dict:
snake_case : Union[str, Any] = self.resnets[0](A , A )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case : Any = attn(A , A , deterministic=A )
snake_case : Dict = resnet(A , A , deterministic=A )
return hidden_states
| 700 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 0 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCamelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , *A , **A ) -> List[Any]:
super().__init__(*A , **A )
requires_backends(self , """decord""" )
self.check_model_type(A )
def UpperCAmelCase ( self , A=None , A=None , A=None ) -> str:
snake_case : List[str] = {}
if frame_sampling_rate is not None:
snake_case : str = frame_sampling_rate
if num_frames is not None:
snake_case : str = num_frames
snake_case : Optional[Any] = {}
if top_k is not None:
snake_case : int = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , A , **A ) -> str:
return super().__call__(A , **A )
def UpperCAmelCase ( self , A , A=None , A=1 ) -> Tuple:
if num_frames is None:
snake_case : Optional[int] = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
snake_case : int = BytesIO(requests.get(A ).content )
snake_case : Optional[int] = VideoReader(A )
videoreader.seek(0 )
snake_case : Union[str, Any] = 0
snake_case : str = num_frames * frame_sampling_rate - 1
snake_case : Dict = np.linspace(A , A , num=A , dtype=np.intaa )
snake_case : Tuple = videoreader.get_batch(A ).asnumpy()
snake_case : Union[str, Any] = list(A )
snake_case : Tuple = self.image_processor(A , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase ( self , A ) -> Union[str, Any]:
snake_case : int = self.model(**A )
return model_outputs
def UpperCAmelCase ( self , A , A=5 ) -> Optional[int]:
if top_k > self.model.config.num_labels:
snake_case : Any = self.model.config.num_labels
if self.framework == "pt":
snake_case : Tuple = model_outputs.logits.softmax(-1 )[0]
snake_case : Optional[Any] = probs.topk(A )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
snake_case : Any = scores.tolist()
snake_case : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(A , A )]
| 701 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
def __init__( self , A , A , A , A , A , A , A , A , A , ) -> Any:
super().__init__()
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=A , speech_processor=A , vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , )
def UpperCAmelCase ( self , A = "auto" ) -> Dict:
if slice_size == "auto":
snake_case : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase ( self ) -> int:
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self , A , A=1_6_0_0_0 , A = 5_1_2 , A = 5_1_2 , A = 5_0 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , **A , ) -> Tuple:
snake_case : List[Any] = self.speech_processor.feature_extractor(
A , return_tensors="""pt""" , sampling_rate=A ).input_features.to(self.device )
snake_case : str = self.speech_model.generate(A , max_length=4_8_0_0_0_0 )
snake_case : int = self.speech_processor.tokenizer.batch_decode(A , skip_special_tokens=A , normalize=A )[
0
]
if isinstance(A , A ):
snake_case : Tuple = 1
elif isinstance(A , A ):
snake_case : Union[str, Any] = len(A )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(A )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(A )}.""" )
# get prompt text embeddings
snake_case : Dict = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
snake_case : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
snake_case : int = text_input_ids[:, : self.tokenizer.model_max_length]
snake_case : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
snake_case : Dict = text_embeddings.shape
snake_case : List[str] = text_embeddings.repeat(1 , A , 1 )
snake_case : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case : List[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case : List[str]
if negative_prompt is None:
snake_case : Dict = [""""""] * batch_size
elif type(A ) is not type(A ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(A )} !="""
f""" {type(A )}.""" )
elif isinstance(A , A ):
snake_case : Optional[int] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
snake_case : Dict = negative_prompt
snake_case : Union[str, Any] = text_input_ids.shape[-1]
snake_case : Any = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
snake_case : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case : int = uncond_embeddings.shape[1]
snake_case : Tuple = uncond_embeddings.repeat(1 , A , 1 )
snake_case : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case : Dict = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
snake_case : int = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
snake_case : str = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
snake_case : Optional[int] = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
snake_case : Tuple = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
snake_case : int = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case : str = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case : Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case : int = {}
if accepts_eta:
snake_case : Any = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
snake_case : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case : List[Any] = self.scheduler.scale_model_input(A , A )
# predict the noise residual
snake_case : List[Any] = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
snake_case : str = noise_pred.chunk(2 )
snake_case : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
snake_case : Optional[Any] = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
snake_case : List[str] = 1 / 0.1_82_15 * latents
snake_case : int = self.vae.decode(A ).sample
snake_case : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
snake_case : Union[str, Any] = self.numpy_to_pil(A )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Tuple = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 684 | 0 |
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCamelCase : List[Any] = 'scheduler_config.json'
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = 1
_snake_case = 2
_snake_case = 3
_snake_case = 4
_snake_case = 5
_snake_case = 6
_snake_case = 7
_snake_case = 8
_snake_case = 9
_snake_case = 10
_snake_case = 11
_snake_case = 12
_snake_case = 13
_snake_case = 14
@dataclass
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = 42
class __lowercase :
"""simple docstring"""
_snake_case = SCHEDULER_CONFIG_NAME
_snake_case = []
_snake_case = True
@classmethod
def UpperCAmelCase ( cls , A = None , A = None , A=False , **A , ) -> List[Any]:
snake_case : Optional[int] = cls.load_config(
pretrained_model_name_or_path=A , subfolder=A , return_unused_kwargs=A , return_commit_hash=A , **A , )
return cls.from_config(A , return_unused_kwargs=A , **A )
def UpperCAmelCase ( self , A , A = False , **A ) -> Union[str, Any]:
self.save_config(save_directory=A , push_to_hub=A , **A )
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_compatibles()
@classmethod
def UpperCAmelCase ( cls ) -> Tuple:
snake_case : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
snake_case : Optional[Any] = importlib.import_module(__name__.split(""".""" )[0] )
snake_case : int = [
getattr(A , A ) for c in compatible_classes_str if hasattr(A , A )
]
return compatible_classes
| 703 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 684 | 0 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
"""simple docstring"""
def __init__( self , A , A=2 , A=8 , A=True , A=True , A=True , A=True , A=9_9 , A=1_6 , A=5 , A=2 , A=3_6 , A="gelu" , A=0.0 , A=0.0 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> List[Any]:
snake_case : Optional[Any] = parent
snake_case : str = batch_size
snake_case : Optional[Any] = seq_length
snake_case : Optional[int] = is_training
snake_case : Dict = use_input_mask
snake_case : Union[str, Any] = use_token_type_ids
snake_case : Tuple = use_labels
snake_case : Any = vocab_size
snake_case : int = hidden_size
snake_case : List[str] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Any = intermediate_size
snake_case : Union[str, Any] = hidden_act
snake_case : Optional[int] = hidden_dropout_prob
snake_case : Any = attention_probs_dropout_prob
snake_case : List[Any] = max_position_embeddings
snake_case : int = type_vocab_size
snake_case : Dict = type_sequence_label_size
snake_case : Tuple = initializer_range
snake_case : Union[str, Any] = num_labels
snake_case : Optional[Any] = num_choices
snake_case : List[str] = scope
def UpperCAmelCase ( self ) -> str:
snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Optional[Any] = None
if self.use_input_mask:
snake_case : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : List[str] = None
if self.use_token_type_ids:
snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : str = None
snake_case : int = None
snake_case : Optional[int] = None
if self.use_labels:
snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case : str = ids_tensor([self.batch_size] , self.num_choices )
snake_case : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> List[Any]:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : Optional[int] = self.get_config()
snake_case : Tuple = 3_0_0
return config
def UpperCAmelCase ( self ) -> Any:
(
snake_case
) : Union[str, Any] = self.prepare_config_and_inputs()
snake_case : Any = True
snake_case : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Any:
snake_case : Tuple = MraModel(config=A )
model.to(A )
model.eval()
snake_case : Tuple = model(A , attention_mask=A , token_type_ids=A )
snake_case : List[str] = model(A , token_type_ids=A )
snake_case : Tuple = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A , A , A , ) -> Optional[Any]:
snake_case : List[Any] = True
snake_case : Any = MraModel(A )
model.to(A )
model.eval()
snake_case : str = model(
A , attention_mask=A , token_type_ids=A , encoder_hidden_states=A , encoder_attention_mask=A , )
snake_case : str = model(
A , attention_mask=A , token_type_ids=A , encoder_hidden_states=A , )
snake_case : List[str] = model(A , attention_mask=A , token_type_ids=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Union[str, Any]:
snake_case : Union[str, Any] = MraForMaskedLM(config=A )
model.to(A )
model.eval()
snake_case : Dict = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Union[str, Any]:
snake_case : Tuple = MraForQuestionAnswering(config=A )
model.to(A )
model.eval()
snake_case : Any = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Any:
snake_case : Tuple = self.num_labels
snake_case : Any = MraForSequenceClassification(A )
model.to(A )
model.eval()
snake_case : List[Any] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Tuple:
snake_case : List[str] = self.num_labels
snake_case : str = MraForTokenClassification(config=A )
model.to(A )
model.eval()
snake_case : Optional[int] = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , A , A , A , A , A , A , A ) -> Dict:
snake_case : List[Any] = self.num_choices
snake_case : str = MraForMultipleChoice(config=A )
model.to(A )
model.eval()
snake_case : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case : Any = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Optional[Any] = self.prepare_config_and_inputs()
(
snake_case
) : Dict = config_and_inputs
snake_case : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = ()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Union[str, Any] = MraModelTester(self )
snake_case : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=3_7 )
def UpperCAmelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case : Dict = type
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def UpperCAmelCase ( self ) -> str:
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Tuple = MraModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason="""MRA does not output attentions""" )
def UpperCAmelCase ( self ) -> Optional[Any]:
return
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase ( self ) -> Dict:
snake_case : List[str] = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
snake_case : Optional[Any] = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
snake_case : Optional[int] = model(A )[0]
snake_case : Any = torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , A )
snake_case : Any = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Optional[int] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
snake_case : Optional[Any] = torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
snake_case : Dict = model(A )[0]
snake_case : List[str] = 5_0_2_6_5
snake_case : Union[str, Any] = torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , A )
snake_case : str = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) )
@slow
def UpperCAmelCase ( self ) -> int:
snake_case : List[Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
snake_case : List[Any] = torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
snake_case : List[str] = model(A )[0]
snake_case : List[str] = 5_0_2_6_5
snake_case : int = torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , A )
snake_case : Union[str, Any] = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) )
| 704 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {'vocab_file': 'spm_char.model'}
lowerCamelCase : List[str] = {
'vocab_file': {
'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model',
'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model',
'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model',
}
}
lowerCamelCase : List[Any] = {
'microsoft/speecht5_asr': 1_0_2_4,
'microsoft/speecht5_tts': 1_0_2_4,
'microsoft/speecht5_vc': 1_0_2_4,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="<s>" , A="</s>" , A="<unk>" , A="<pad>" , A = None , **A , ) -> None:
snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
snake_case : Tuple = vocab_file
snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def UpperCAmelCase ( self ) -> List[Any]:
return self.sp_model.get_piece_size()
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Any = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[str]:
snake_case : Optional[Any] = self.__dict__.copy()
snake_case : Optional[Any] = None
return state
def __setstate__( self , A ) -> Tuple:
snake_case : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case : List[Any] = {}
snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase ( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def UpperCAmelCase ( self , A ) -> Tuple:
return self.sp_model.piece_to_id(A )
def UpperCAmelCase ( self , A ) -> int:
snake_case : Union[str, Any] = self.sp_model.IdToPiece(A )
return token
def UpperCAmelCase ( self , A ) -> Tuple:
snake_case : Optional[int] = []
snake_case : str = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
snake_case : Dict = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def UpperCAmelCase ( self , A , A=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
snake_case : Any = [1]
if token_ids_a is None:
return ([0] * len(A )) + suffix_ones
return ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Optional[Any] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
snake_case : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 684 | 0 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 705 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gpt_neox_japanese"""
def __init__( self , A=3_2_0_0_0 , A=2_5_6_0 , A=3_2 , A=3_2 , A=4 , A="gelu" , A=1.00 , A=1_0_0_0_0 , A=2_0_4_8 , A=0.02 , A=1e-5 , A=True , A=3_1_9_9_6 , A=3_1_9_9_9 , A=0.1 , A=0.0 , **A , ) -> str:
super().__init__(bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[Any] = vocab_size
snake_case : Optional[Any] = max_position_embeddings
snake_case : Union[str, Any] = hidden_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : Optional[int] = num_attention_heads
snake_case : Optional[int] = intermediate_multiple_size
snake_case : int = hidden_act
snake_case : str = rotary_pct
snake_case : Optional[Any] = rotary_emb_base
snake_case : Any = initializer_range
snake_case : Any = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : Tuple = attention_dropout
snake_case : Tuple = hidden_dropout
| 684 | 0 |
import argparse
import os
import re
lowerCamelCase : List[Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCamelCase : List[str] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
lowerCamelCase : List[Any] = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = False ) -> str:
with open(lowercase ,"""r""" ,encoding="""utf-8""" ) as f:
snake_case : Dict = f.read()
snake_case : Dict = content.split("""\n""" )
snake_case : int = []
snake_case : Tuple = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
snake_case : Optional[Any] = len(re.search(R"""^(\s*)\S""" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(""" """ * indent + """(""" ):
new_lines.append(lines[line_idx] )
line_idx += 1
snake_case : Optional[int] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
snake_case : Optional[int] = line_idx
while not lines[line_idx].startswith(""" """ * indent + """)""" ):
line_idx += 1
blocks.append("""\n""".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
snake_case : Optional[int] = sorted(lowercase ,key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write("""\n""".join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def SCREAMING_SNAKE_CASE__ ( lowercase = False ) -> str:
snake_case : Dict = [os.path.join(lowercase ,lowercase ) for f in os.listdir(lowercase ) if f.endswith(""".py""" )]
snake_case : List[str] = [sort_auto_mapping(lowercase ,overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
snake_case : int = [f for f, d in zip(lowercase ,lowercase ) if d]
raise ValueError(
f"""The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix"""
""" this.""" )
if __name__ == "__main__":
lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCamelCase : str = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 706 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
snake_case : Any = hex_num[0] == """-"""
if is_negative:
snake_case : int = hex_num[1:]
try:
snake_case : List[Any] = int(lowercase ,16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
snake_case : Dict = """"""
while int_num > 0:
snake_case : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 684 | 0 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""pixel_values"""]
def __init__( self , A = True , A = None , A = PIL.Image.BICUBIC , A = True , A = None , A = 1 / 2_5_5 , A = True , A = True , A = None , A = None , **A , ) -> None:
super().__init__(**A )
snake_case : int = size if size is not None else {"""height""": 2_5_6, """width""": 2_5_6}
snake_case : int = get_size_dict(A )
snake_case : Optional[Any] = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
snake_case : Dict = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = do_resize
snake_case : str = size
snake_case : Tuple = resample
snake_case : Any = do_center_crop
snake_case : Tuple = crop_size
snake_case : int = do_rescale
snake_case : Dict = rescale_factor
snake_case : Union[str, Any] = do_normalize
snake_case : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCAmelCase ( self , A , A , A = PIL.Image.BICUBIC , A = None , **A , ) -> np.ndarray:
snake_case : Dict = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
A , size=(size["""height"""], size["""width"""]) , resample=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray:
snake_case : Any = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(A , size=(size["""height"""], size["""width"""]) , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A = None , **A , ) -> Tuple:
return rescale(A , scale=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray:
return normalize(A , mean=A , std=A , data_format=A , **A )
def UpperCAmelCase ( self , A , A = None , A = None , A=None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image:
snake_case : str = do_resize if do_resize is not None else self.do_resize
snake_case : Dict = resample if resample is not None else self.resample
snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
snake_case : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case : int = image_mean if image_mean is not None else self.image_mean
snake_case : List[str] = image_std if image_std is not None else self.image_std
snake_case : Dict = size if size is not None else self.size
snake_case : Tuple = get_size_dict(A )
snake_case : Dict = crop_size if crop_size is not None else self.crop_size
snake_case : List[str] = get_size_dict(A , param_name="""crop_size""" )
snake_case : int = make_list_of_images(A )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case : Optional[Any] = [to_numpy_array(A ) for image in images]
if do_resize:
snake_case : Dict = [self.resize(image=A , size=A , resample=A ) for image in images]
if do_center_crop:
snake_case : List[str] = [self.center_crop(image=A , size=A ) for image in images]
if do_rescale:
snake_case : List[str] = [self.rescale(image=A , scale=A ) for image in images]
if do_normalize:
snake_case : str = [self.normalize(image=A , mean=A , std=A ) for image in images]
snake_case : Union[str, Any] = [to_channel_dimension_format(A , A ) for image in images]
snake_case : List[Any] = {"""pixel_values""": images}
return BatchFeature(data=A , tensor_type=A )
| 684 | 0 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCamelCase : Any = _symbol_database.Default()
lowerCamelCase : Optional[int] = _descriptor_pool.Default().AddSerializedFile(
B'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
lowerCamelCase : Optional[int] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCamelCase : Optional[Any] = None
lowerCamelCase : int = B'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCamelCase : Any = 4_5
lowerCamelCase : Optional[Any] = 1_5_8_1
lowerCamelCase : Tuple = 1_5_1_7
lowerCamelCase : Dict = 1_5_7_0
lowerCamelCase : List[Any] = 1_5_8_4
lowerCamelCase : str = 1_7_9_3
lowerCamelCase : Union[str, Any] = 1_7_9_5
lowerCamelCase : Optional[int] = 1_9_1_6
lowerCamelCase : str = 1_8_6_4
lowerCamelCase : Any = 1_9_0_5
lowerCamelCase : Union[str, Any] = 1_9_1_9
lowerCamelCase : List[Any] = 2_4_2_9
lowerCamelCase : str = 2_2_0_8
lowerCamelCase : Union[str, Any] = 2_4_1_8
lowerCamelCase : List[Any] = 2_3_2_3
lowerCamelCase : str = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 708 |
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684 | 0 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
snake_case : List[str] = analyze_text(lowercase )
snake_case : Optional[int] = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
snake_case : int = sum(single_char_strings.values() )
# one length string
snake_case : Optional[Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
snake_case : Dict = single_char_strings[ch]
snake_case : Tuple = my_str / all_sum
my_fir_sum += prob * math.loga(lowercase ) # entropy formula.
# print entropy
print(f"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
snake_case : Dict = sum(two_char_strings.values() )
snake_case : List[Any] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
snake_case : List[Any] = cha + cha
if sequence in two_char_strings:
snake_case : Tuple = two_char_strings[sequence]
snake_case : str = int(lowercase ) / all_sum
my_sec_sum += prob * math.loga(lowercase )
# print second entropy
print(f"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[dict, dict]:
snake_case : int = Counter() # type: ignore
snake_case : Union[str, Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 ,len(lowercase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 709 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 0 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class __lowercase (unittest.TestCase , UpperCamelCase__ ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
snake_case : str = load_tool("""text-classification""" )
self.tool.setup()
snake_case : Any = load_tool("""text-classification""" , remote=A )
def UpperCAmelCase ( self ) -> int:
snake_case : str = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(A , """positive""" )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Any = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(A , """positive""" )
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : Optional[Any] = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(A , """positive""" )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : int = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(A , """positive""" )
| 710 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """audio-spectrogram-transformer"""
def __init__( self , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1e-1_2 , A=1_6 , A=True , A=1_0 , A=1_0 , A=1_0_2_4 , A=1_2_8 , **A , ) -> int:
super().__init__(**A )
snake_case : Any = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : Dict = intermediate_size
snake_case : int = hidden_act
snake_case : int = hidden_dropout_prob
snake_case : Tuple = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : int = layer_norm_eps
snake_case : Any = patch_size
snake_case : List[Any] = qkv_bias
snake_case : int = frequency_stride
snake_case : Any = time_stride
snake_case : Union[str, Any] = max_length
snake_case : Any = num_mel_bins
| 684 | 0 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
lowerCamelCase : Optional[Any] = '\nimport os\n'
lowerCamelCase : str = '\ndef foo():\n import os\n return False\n'
lowerCamelCase : Optional[Any] = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
lowerCamelCase : List[str] = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
lowerCamelCase : Optional[int] = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
lowerCamelCase : int = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
lowerCamelCase : Union[str, Any] = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
lowerCamelCase : Union[str, Any] = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
lowerCamelCase : List[str] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
lowerCamelCase : Dict = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
lowerCamelCase : Any = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" ,lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[Any]:
snake_case : Optional[int] = os.path.join(lowercase ,"""test_file.py""" )
with open(lowercase ,"""w""" ) as _tmp_file:
_tmp_file.write(lowercase )
snake_case : str = get_imports(lowercase )
assert parsed_imports == ["os"]
| 711 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684 | 0 |
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 712 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
snake_case : int = []
for line in lines:
snake_case : Dict = re.sub(R"""#.*""" ,"""""" ,lowercase ) # remove comments
if line:
filtered_lines.append(lowercase )
snake_case : Optional[int] = """\n""".join(lowercase )
# Make a hash from all this code
snake_case : List[str] = full_str.encode("""utf-8""" )
return shaaaa(lowercase ).hexdigest()
# get importable module names and hash for caching
lowerCamelCase : Any = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowerCamelCase : Optional[int] = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowerCamelCase : Tuple = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
lowerCamelCase : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 684 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> List[str]:
assert x is not None
assert y is not None
snake_case : Optional[Any] = len(lowercase )
snake_case : List[Any] = len(lowercase )
# declaring the array for storing the dp values
snake_case : Optional[int] = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 ,m + 1 ):
for j in range(1 ,n + 1 ):
snake_case : Union[str, Any] = 1 if x[i - 1] == y[j - 1] else 0
snake_case : List[str] = max(l[i - 1][j] ,l[i][j - 1] ,l[i - 1][j - 1] + match )
snake_case : Tuple = """"""
snake_case : List[str] = m, n
while i > 0 and j > 0:
snake_case : Tuple = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
snake_case : Union[str, Any] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
lowerCamelCase : Tuple = 'AGGTAB'
lowerCamelCase : int = 'GXTXAYB'
lowerCamelCase : str = 4
lowerCamelCase : List[Any] = 'GTAB'
lowerCamelCase : List[str] = longest_common_subsequence(a, b)
print('len =', ln, ', sub-sequence =', subseq)
import doctest
doctest.testmod()
| 713 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Tuple:
# Initialise PyTorch model
snake_case : int = RemBertConfig.from_json_file(lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(lowercase ) ) )
snake_case : Tuple = RemBertModel(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase ,lowercase ,lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(lowercase ) )
torch.save(model.state_dict() ,lowercase )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase : Dict = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 684 | 0 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 714 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""flax"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""flax"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""flax"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
| 684 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class __lowercase (UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
_snake_case = 1
@register_to_config
def __init__( self , A=2_0_0_0 , A=0.1 , A=2_0 , A=1e-3 ) -> Optional[Any]:
snake_case : List[str] = None
snake_case : List[str] = None
snake_case : int = None
def UpperCAmelCase ( self , A , A = None ) -> Tuple:
snake_case : Dict = torch.linspace(1 , self.config.sampling_eps , A , device=A )
def UpperCAmelCase ( self , A , A , A , A=None ) -> Any:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
snake_case : List[str] = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
snake_case : Tuple = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
snake_case : Any = std.flatten()
while len(std.shape ) < len(score.shape ):
snake_case : Union[str, Any] = std.unsqueeze(-1 )
snake_case : int = -score / std
# compute
snake_case : List[str] = -1.0 / len(self.timesteps )
snake_case : Tuple = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
snake_case : List[str] = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
snake_case : Optional[Any] = beta_t.unsqueeze(-1 )
snake_case : List[str] = -0.5 * beta_t * x
snake_case : str = torch.sqrt(A )
snake_case : Optional[int] = drift - diffusion**2 * score
snake_case : Dict = x + drift * dt
# add noise
snake_case : int = randn_tensor(x.shape , layout=x.layout , generator=A , device=x.device , dtype=x.dtype )
snake_case : List[Any] = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> int:
return self.config.num_train_timesteps
| 715 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase : List[str] = 3
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
print("""Generating primitive root of p""" )
while True:
snake_case : Optional[int] = random.randrange(3 ,lowercase )
if pow(lowercase ,2 ,lowercase ) == 1:
continue
if pow(lowercase ,lowercase ,lowercase ) == 1:
continue
return g
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
print("""Generating prime p...""" )
snake_case : Optional[int] = rabin_miller.generate_large_prime(lowercase ) # select large prime number.
snake_case : Optional[int] = primitive_root(lowercase ) # one primitive root on modulo p.
snake_case : Optional[Any] = random.randrange(3 ,lowercase ) # private_key -> have to be greater than 2 for safety.
snake_case : Tuple = cryptomath.find_mod_inverse(pow(lowercase ,lowercase ,lowercase ) ,lowercase )
snake_case : str = (key_size, e_a, e_a, p)
snake_case : Optional[Any] = (key_size, d)
return public_key, private_key
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> None:
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case , snake_case : Optional[Any] = generate_key(lowercase )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" ,"""w""" ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
print("""Making key files...""" )
make_key_files("""elgamal""" ,2048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 684 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
lowerCamelCase : Any = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = ['DPTFeatureExtractor']
lowerCamelCase : Optional[int] = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 716 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case : Dict = _modexpt(lowercase ,exponent // 2 ,lowercase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(lowercase ,exponent - 1 ,lowercase )) % modulo_value
def SCREAMING_SNAKE_CASE__ ( lowercase = 1777 ,lowercase = 1855 ,lowercase = 8 ) -> int:
snake_case : int = base
for _ in range(1 ,lowercase ):
snake_case : List[str] = _modexpt(lowercase ,lowercase ,10**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : str = 1
snake_case : Union[str, Any] = 3
snake_case : Any = (3_2, 3_2)
snake_case : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
snake_case : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=A , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def UpperCAmelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case : Tuple = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def UpperCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
return CLIPTextModel(A )
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Any = self.dummy_cond_unet_upscale
snake_case : Optional[int] = DDPMScheduler()
snake_case : Optional[int] = DDIMScheduler(prediction_type="""v_prediction""" )
snake_case : Optional[Any] = self.dummy_vae
snake_case : str = self.dummy_text_encoder
snake_case : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : Any = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
snake_case : Dict = StableDiffusionUpscalePipeline(
unet=A , low_res_scheduler=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , max_noise_level=3_5_0 , )
snake_case : Union[str, Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
snake_case : Dict = """A painting of a squirrel eating a burger"""
snake_case : Optional[Any] = torch.Generator(device=A ).manual_seed(0 )
snake_case : Any = sd_pipe(
[prompt] , image=A , generator=A , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="""np""" , )
snake_case : Union[str, Any] = output.images
snake_case : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
snake_case : List[str] = sd_pipe(
[prompt] , image=A , generator=A , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="""np""" , return_dict=A , )[0]
snake_case : str = image[0, -3:, -3:, -1]
snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
snake_case : Optional[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
snake_case : Union[str, Any] = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self ) -> Dict:
snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Dict = self.dummy_cond_unet_upscale
snake_case : Any = DDPMScheduler()
snake_case : Union[str, Any] = DDIMScheduler(prediction_type="""v_prediction""" )
snake_case : List[str] = self.dummy_vae
snake_case : List[Any] = self.dummy_text_encoder
snake_case : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : Optional[Any] = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
snake_case : Union[str, Any] = StableDiffusionUpscalePipeline(
unet=A , low_res_scheduler=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , max_noise_level=3_5_0 , )
snake_case : Dict = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
snake_case : List[str] = """A painting of a squirrel eating a burger"""
snake_case : str = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="""np""" , )
snake_case : Union[str, Any] = output.images
assert image.shape[0] == 2
snake_case : Optional[int] = torch.Generator(device=A ).manual_seed(0 )
snake_case : Tuple = sd_pipe(
[prompt] , image=A , generator=A , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="""np""" , )
snake_case : Tuple = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def UpperCAmelCase ( self ) -> str:
snake_case : str = self.dummy_cond_unet_upscale
snake_case : Tuple = DDPMScheduler()
snake_case : str = DDIMScheduler(prediction_type="""v_prediction""" )
snake_case : str = self.dummy_vae
snake_case : List[Any] = self.dummy_text_encoder
snake_case : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case : Optional[Any] = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
snake_case : Any = unet.half()
snake_case : List[str] = text_encoder.half()
# make sure here that pndm scheduler skips prk
snake_case : Union[str, Any] = StableDiffusionUpscalePipeline(
unet=A , low_res_scheduler=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , max_noise_level=3_5_0 , )
snake_case : Dict = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
snake_case : Optional[int] = """A painting of a squirrel eating a burger"""
snake_case : Dict = torch.manual_seed(0 )
snake_case : Tuple = sd_pipe(
[prompt] , image=A , generator=A , num_inference_steps=2 , output_type="""np""" , ).images
snake_case : Optional[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
snake_case : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
snake_case : Optional[int] = """stabilityai/stable-diffusion-x4-upscaler"""
snake_case : Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
snake_case : Any = """a cat sitting on a park bench"""
snake_case : Optional[Any] = torch.manual_seed(0 )
snake_case : str = pipe(
prompt=A , image=A , generator=A , output_type="""np""" , )
snake_case : List[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def UpperCAmelCase ( self ) -> Dict:
snake_case : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
snake_case : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
snake_case : Union[str, Any] = """stabilityai/stable-diffusion-x4-upscaler"""
snake_case : int = StableDiffusionUpscalePipeline.from_pretrained(
A , torch_dtype=torch.floataa , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
snake_case : int = """a cat sitting on a park bench"""
snake_case : Union[str, Any] = torch.manual_seed(0 )
snake_case : List[str] = pipe(
prompt=A , image=A , generator=A , output_type="""np""" , )
snake_case : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def UpperCAmelCase ( self ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
snake_case : int = """stabilityai/stable-diffusion-x4-upscaler"""
snake_case : Any = StableDiffusionUpscalePipeline.from_pretrained(
A , torch_dtype=torch.floataa , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case : List[Any] = """a cat sitting on a park bench"""
snake_case : Optional[Any] = torch.manual_seed(0 )
snake_case : List[Any] = pipe(
prompt=A , image=A , generator=A , num_inference_steps=5 , output_type="""np""" , )
snake_case : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 717 |
from itertools import product
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list[int]:
snake_case : Tuple = sides_number
snake_case : List[str] = max_face_number * dice_number
snake_case : Any = [0] * (max_total + 1)
snake_case : int = 1
snake_case : List[str] = range(lowercase ,max_face_number + 1 )
for dice_numbers in product(lowercase ,repeat=lowercase ):
snake_case : Any = sum(lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE__ ( ) -> float:
snake_case : List[str] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
snake_case : str = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
snake_case : Optional[int] = 0
snake_case : List[str] = 9
snake_case : Union[str, Any] = 4 * 9
snake_case : Dict = 6
for peter_total in range(lowercase ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
snake_case : str = (4**9) * (6**6)
snake_case : int = peter_wins_count / total_games_number
snake_case : Optional[int] = round(lowercase ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : int = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """luke"""
def __init__( self , A=5_0_2_6_7 , A=5_0_0_0_0_0 , A=7_6_8 , A=2_5_6 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1e-1_2 , A=True , A=None , A=1 , A=0 , A=2 , **A , ) -> Optional[int]:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
snake_case : Optional[int] = vocab_size
snake_case : Optional[Any] = entity_vocab_size
snake_case : Dict = hidden_size
snake_case : List[str] = entity_emb_size
snake_case : Union[str, Any] = num_hidden_layers
snake_case : str = num_attention_heads
snake_case : Optional[int] = hidden_act
snake_case : Tuple = intermediate_size
snake_case : List[Any] = hidden_dropout_prob
snake_case : Optional[int] = attention_probs_dropout_prob
snake_case : Union[str, Any] = max_position_embeddings
snake_case : Dict = type_vocab_size
snake_case : List[Any] = initializer_range
snake_case : Optional[Any] = layer_norm_eps
snake_case : List[str] = use_entity_aware_attention
snake_case : Union[str, Any] = classifier_dropout | 718 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 0 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
lowerCamelCase : int = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,lowercase=False ,) -> Optional[int]:
output_path.parent.mkdir(parents=lowercase ,exist_ok=lowercase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowercase ,lowercase ,f=output_path.as_posix() ,input_names=lowercase ,output_names=lowercase ,dynamic_axes=lowercase ,do_constant_folding=lowercase ,use_external_data_format=lowercase ,enable_onnx_checker=lowercase ,opset_version=lowercase ,)
else:
export(
lowercase ,lowercase ,f=output_path.as_posix() ,input_names=lowercase ,output_names=lowercase ,dynamic_axes=lowercase ,do_constant_folding=lowercase ,opset_version=lowercase ,)
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase = False ) -> str:
snake_case : Optional[int] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
snake_case : int = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
snake_case : Dict = """cpu"""
snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(lowercase ,torch_dtype=lowercase ).to(lowercase )
snake_case : Optional[Any] = Path(lowercase )
# TEXT ENCODER
snake_case : List[Any] = pipeline.text_encoder.config.max_position_embeddings
snake_case : Any = pipeline.text_encoder.config.hidden_size
snake_case : Optional[Any] = pipeline.tokenizer(
"""A sample prompt""" ,padding="""max_length""" ,max_length=pipeline.tokenizer.model_max_length ,truncation=lowercase ,return_tensors="""pt""" ,)
onnx_export(
pipeline.text_encoder ,model_args=(text_input.input_ids.to(device=lowercase ,dtype=torch.intaa )) ,output_path=output_path / """text_encoder""" / """model.onnx""" ,ordered_input_names=["""input_ids"""] ,output_names=["""last_hidden_state""", """pooler_output"""] ,dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} ,opset=lowercase ,)
del pipeline.text_encoder
# UNET
snake_case : Optional[Any] = pipeline.unet.config.in_channels
snake_case : Dict = pipeline.unet.config.sample_size
snake_case : Optional[Any] = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet ,model_args=(
torch.randn(2 ,lowercase ,lowercase ,lowercase ).to(device=lowercase ,dtype=lowercase ),
torch.randn(2 ).to(device=lowercase ,dtype=lowercase ),
torch.randn(2 ,lowercase ,lowercase ).to(device=lowercase ,dtype=lowercase ),
False,
) ,output_path=lowercase ,ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] ,output_names=["""out_sample"""] ,dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} ,opset=lowercase ,use_external_data_format=lowercase ,)
snake_case : Optional[Any] = str(unet_path.absolute().as_posix() )
snake_case : List[Any] = os.path.dirname(lowercase )
snake_case : Optional[Any] = onnx.load(lowercase )
# clean up existing tensor files
shutil.rmtree(lowercase )
os.mkdir(lowercase )
# collate external tensor files into one
onnx.save_model(
lowercase ,lowercase ,save_as_external_data=lowercase ,all_tensors_to_one_file=lowercase ,location="""weights.pb""" ,convert_attribute=lowercase ,)
del pipeline.unet
# VAE ENCODER
snake_case : Optional[int] = pipeline.vae
snake_case : List[Any] = vae_encoder.config.in_channels
snake_case : Optional[Any] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
snake_case : Dict = lambda lowercase ,lowercase : vae_encoder.encode(lowercase ,lowercase )[0].sample()
onnx_export(
lowercase ,model_args=(
torch.randn(1 ,lowercase ,lowercase ,lowercase ).to(device=lowercase ,dtype=lowercase ),
False,
) ,output_path=output_path / """vae_encoder""" / """model.onnx""" ,ordered_input_names=["""sample""", """return_dict"""] ,output_names=["""latent_sample"""] ,dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} ,opset=lowercase ,)
# VAE DECODER
snake_case : Tuple = pipeline.vae
snake_case : int = vae_decoder.config.latent_channels
snake_case : Optional[int] = vae_decoder.config.out_channels
# forward only through the decoder part
snake_case : Optional[Any] = vae_encoder.decode
onnx_export(
lowercase ,model_args=(
torch.randn(1 ,lowercase ,lowercase ,lowercase ).to(device=lowercase ,dtype=lowercase ),
False,
) ,output_path=output_path / """vae_decoder""" / """model.onnx""" ,ordered_input_names=["""latent_sample""", """return_dict"""] ,output_names=["""sample"""] ,dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} ,opset=lowercase ,)
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
snake_case : Any = pipeline.safety_checker
snake_case : Optional[int] = safety_checker.config.vision_config.num_channels
snake_case : Dict = safety_checker.config.vision_config.image_size
snake_case : Optional[Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker ,model_args=(
torch.randn(
1 ,lowercase ,lowercase ,lowercase ,).to(device=lowercase ,dtype=lowercase ),
torch.randn(1 ,lowercase ,lowercase ,lowercase ).to(device=lowercase ,dtype=lowercase ),
) ,output_path=output_path / """safety_checker""" / """model.onnx""" ,ordered_input_names=["""clip_input""", """images"""] ,output_names=["""out_images""", """has_nsfw_concepts"""] ,dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} ,opset=lowercase ,)
del pipeline.safety_checker
snake_case : List[Any] = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
snake_case : Optional[Any] = pipeline.feature_extractor
else:
snake_case : Optional[int] = None
snake_case : int = None
snake_case : List[str] = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) ,vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) ,text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) ,tokenizer=pipeline.tokenizer ,unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) ,scheduler=pipeline.scheduler ,safety_checker=lowercase ,feature_extractor=lowercase ,requires_safety_checker=safety_checker is not None ,)
onnx_pipeline.save_pretrained(lowercase )
print("""ONNX pipeline saved to""" ,lowercase )
del pipeline
del onnx_pipeline
snake_case : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(lowercase ,provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=1_4,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
lowerCamelCase : List[Any] = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 719 |
import os
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
with open(os.path.dirname(lowercase ) + """/grid.txt""" ) as f:
snake_case : Tuple = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowercase ) for x in f.readline().split()] )
snake_case : Optional[Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case : List[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case : int = temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
snake_case : Any = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 684 | 0 |
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
lowerCamelCase : Optional[Any] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
lowerCamelCase : Union[str, Any] = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
lowerCamelCase : List[str] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> tuple[str, float]:
snake_case : List[Any] = len([g for position, g in enumerate(lowercase ) if g == main_target[position]] )
return (item, float(lowercase ))
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> tuple[str, str]:
snake_case : Union[str, Any] = random.randint(0 ,len(lowercase ) - 1 )
snake_case : Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
snake_case : str = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
snake_case : Union[str, Any] = list(lowercase )
if random.uniform(0 ,1 ) < MUTATION_PROBABILITY:
snake_case : Any = random.choice(lowercase )
return "".join(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,) -> list[str]:
snake_case : Tuple = []
# Generate more children proportionally to the fitness score.
snake_case : Optional[int] = int(parent_a[1] * 100 ) + 1
snake_case : Union[str, Any] = 10 if child_n >= 10 else child_n
for _ in range(lowercase ):
snake_case : str = population_score[random.randint(0 ,lowercase )][0]
snake_case : int = crossover(parent_a[0] ,lowercase )
# Append new string to the population list.
pop.append(mutate(lowercase ,lowercase ) )
pop.append(mutate(lowercase ,lowercase ) )
return pop
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
snake_case : int = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(lowercase )
# Verify that the target contains no genes besides the ones inside genes variable.
snake_case : int = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
snake_case : List[Any] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(lowercase )
# Generate random starting population.
snake_case : Any = []
for _ in range(lowercase ):
population.append("""""".join([random.choice(lowercase ) for i in range(len(lowercase ) )] ) )
# Just some logs to know what the algorithms is doing.
snake_case : str = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
snake_case : Tuple = [evaluate(lowercase ,lowercase ) for item in population]
# Check if there is a matching evolution.
snake_case : int = sorted(lowercase ,key=lambda lowercase : x[1] ,reverse=lowercase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
snake_case : Any = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase )
# Normalize population score to be between 0 and 1.
snake_case : int = [
(item, score / len(lowercase )) for item, score in population_score
]
# This is selection
for i in range(lowercase ):
population.extend(select(population_score[int(lowercase )] ,lowercase ,lowercase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase ) > N_POPULATION:
break
if __name__ == "__main__":
lowerCamelCase : Optional[int] = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
lowerCamelCase : List[Any] = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
lowerCamelCase : Any = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 720 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 0 |
from pathlib import Path
import fire
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Any:
snake_case : Dict = Path(lowercase )
snake_case : Any = Path(lowercase )
dest_dir.mkdir(exist_ok=lowercase )
for path in src_dir.iterdir():
snake_case : Any = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case : Dict = dest_dir.joinpath(path.name )
print(lowercase )
dest_path.open("""w""" ).write("""\n""".join(lowercase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 721 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Any = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase : Any = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase : Optional[int] = {
'jukebox': 5_1_2,
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = VOCAB_FILES_NAMES
_snake_case = PRETRAINED_VOCAB_FILES_MAP
_snake_case = PRETRAINED_LYRIC_TOKENS_SIZES
_snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A , A , A=["v3", "v2", "v2"] , A=5_1_2 , A=5 , A="<|endoftext|>" , **A , ) -> Optional[Any]:
snake_case : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
super().__init__(
unk_token=A , n_genres=A , version=A , max_n_lyric_tokens=A , **A , )
snake_case : Optional[Any] = version
snake_case : Optional[Any] = max_n_lyric_tokens
snake_case : Tuple = n_genres
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : Union[str, Any] = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : str = json.load(A )
with open(A , encoding="""utf-8""" ) as vocab_handle:
snake_case : List[str] = json.load(A )
snake_case : Tuple = r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"""
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 7_9:
snake_case : Optional[Any] = oov.replace(r"""\-'""" , r"""\-+'""" )
snake_case : Optional[Any] = regex.compile(A )
snake_case : Optional[Any] = {v: k for k, v in self.artists_encoder.items()}
snake_case : int = {v: k for k, v in self.genres_encoder.items()}
snake_case : List[Any] = {v: k for k, v in self.lyrics_encoder.items()}
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def UpperCAmelCase ( self ) -> str:
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def UpperCAmelCase ( self , A , A , A ) -> Optional[Any]:
snake_case : Optional[int] = [self.artists_encoder.get(A , 0 ) for artist in list_artists]
for genres in range(len(A ) ):
snake_case : Optional[int] = [self.genres_encoder.get(A , 0 ) for genre in list_genres[genres]]
snake_case : Union[str, Any] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
snake_case : Optional[Any] = [[self.lyrics_encoder.get(A , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def UpperCAmelCase ( self , A ) -> List[str]:
return list(A )
def UpperCAmelCase ( self , A , A , A , **A ) -> List[str]:
snake_case , snake_case , snake_case : Any = self.prepare_for_tokenization(A , A , A )
snake_case : Tuple = self._tokenize(A )
return artist, genre, lyrics
def UpperCAmelCase ( self , A , A , A , A = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
snake_case : Tuple = artists[idx].lower()
snake_case : List[Any] = [genres[idx].lower()]
else:
snake_case : Union[str, Any] = self._normalize(artists[idx] ) + """.v2"""
snake_case : Any = [
self._normalize(A ) + """.v2""" for genre in genres[idx].split("""_""" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
snake_case : str = regex.compile(r"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" )
snake_case : Dict = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"""
snake_case : Union[str, Any] = {vocab[index]: index + 1 for index in range(len(A ) )}
snake_case : Optional[int] = 0
snake_case : Union[str, Any] = len(A ) + 1
snake_case : Optional[int] = self.vocab
snake_case : str = {v: k for k, v in self.vocab.items()}
snake_case : int = """"""
else:
snake_case : Optional[int] = regex.compile(r"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""" )
snake_case : int = self._run_strip_accents(A )
snake_case : Any = lyrics.replace("""\\""" , """\n""" )
snake_case : Tuple = self.out_of_vocab.sub("""""" , A ), [], []
return artists, genres, lyrics
def UpperCAmelCase ( self , A ) -> List[Any]:
snake_case : int = unicodedata.normalize("""NFD""" , A )
snake_case : int = []
for char in text:
snake_case : Optional[Any] = unicodedata.category(A )
if cat == "Mn":
continue
output.append(A )
return "".join(A )
def UpperCAmelCase ( self , A ) -> str:
snake_case : Dict = (
[chr(A ) for i in range(ord("""a""" ) , ord("""z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""A""" ) , ord("""Z""" ) + 1 )]
+ [chr(A ) for i in range(ord("""0""" ) , ord("""9""" ) + 1 )]
+ ["""."""]
)
snake_case : Dict = frozenset(A )
snake_case : Dict = re.compile(r"""_+""" )
snake_case : str = """""".join([c if c in accepted else """_""" for c in text.lower()] )
snake_case : List[Any] = pattern.sub("""_""" , A ).strip("""_""" )
return text
def UpperCAmelCase ( self , A ) -> str:
return " ".join(A )
def UpperCAmelCase ( self , A , A = None , A = False ) -> List[Any]:
# Convert to TensorType
if not isinstance(A , A ):
snake_case : Tuple = TensorType(A )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"""Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""" )
import tensorflow as tf
snake_case : Union[str, Any] = tf.constant
snake_case : int = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""" )
import torch
snake_case : List[str] = torch.tensor
snake_case : Optional[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""" )
import jax.numpy as jnp # noqa: F811
snake_case : Optional[int] = jnp.array
snake_case : Dict = _is_jax
else:
snake_case : List[str] = np.asarray
snake_case : Tuple = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
snake_case : Any = [inputs]
if not is_tensor(A ):
snake_case : List[Any] = as_tensor(A )
except: # noqa E722
raise ValueError(
"""Unable to create tensor, you should probably activate truncation and/or padding """
"""with 'padding=True' 'truncation=True' to have batched tensors with the same length.""" )
return inputs
def __call__( self , A , A , A="" , A="pt" ) -> BatchEncoding:
snake_case : List[str] = [0, 0, 0]
snake_case : List[str] = [artist] * len(self.version )
snake_case : List[Any] = [genres] * len(self.version )
snake_case , snake_case , snake_case : Optional[int] = self.tokenize(A , A , A )
snake_case , snake_case , snake_case : int = self._convert_token_to_id(A , A , A )
snake_case : Any = [-INFINITY] * len(full_tokens[-1] )
snake_case : int = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=A )
for i in range(len(self.version ) )
]
return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks} )
def UpperCAmelCase ( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=A ) )
snake_case : Any = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=A ) )
snake_case : Tuple = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""] )
with open(A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=A ) )
return (artists_file, genres_file, lyrics_file)
def UpperCAmelCase ( self , A , A , A ) -> List[Any]:
snake_case : Optional[int] = self.artists_decoder.get(A )
snake_case : Optional[Any] = [self.genres_decoder.get(A ) for genre in genres_index]
snake_case : Optional[int] = [self.lyrics_decoder.get(A ) for character in lyric_index]
return artist, genres, lyrics
| 684 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """gptsan-japanese"""
_snake_case = [
"""past_key_values""",
]
_snake_case = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , A=3_6_0_0_0 , A=1_2_8_0 , A=1_0_2_4 , A=8_1_9_2 , A=4_0_9_6 , A=1_2_8 , A=1_0 , A=0 , A=1_6 , A=1_6 , A=1_2_8 , A=0.0 , A=1e-5 , A=False , A=0.0 , A="float32" , A=False , A=False , A=False , A=0.0_02 , A=False , A=True , A=3_5_9_9_8 , A=3_5_9_9_5 , A=3_5_9_9_9 , **A , ) -> Any:
snake_case : Tuple = vocab_size
snake_case : List[Any] = max_position_embeddings
snake_case : Tuple = d_model
snake_case : Optional[Any] = d_ff
snake_case : List[Any] = d_ext
snake_case : str = d_spout
snake_case : List[Any] = num_switch_layers
snake_case : Tuple = num_ext_layers
snake_case : Optional[int] = num_switch_layers + num_ext_layers
snake_case : Any = num_heads
snake_case : Tuple = num_experts
snake_case : int = expert_capacity
snake_case : Optional[int] = dropout_rate
snake_case : int = layer_norm_epsilon
snake_case : List[Any] = router_bias
snake_case : Tuple = router_jitter_noise
snake_case : Optional[Any] = router_dtype
snake_case : Optional[Any] = router_ignore_padding_tokens
snake_case : str = output_hidden_states
snake_case : Tuple = output_attentions
snake_case : Optional[Any] = initializer_factor
snake_case : List[Any] = output_router_logits
snake_case : Any = use_cache
super().__init__(
separator_token_id=A , pad_token_id=A , eos_token_id=A , **A , )
| 700 |
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
snake_case : str = len(lowercase )
snake_case : Tuple = []
for i in range(len(lowercase ) - pat_len + 1 ):
snake_case : str = True
for j in range(lowercase ):
if s[i + j] != pattern[j]:
snake_case : Dict = False
break
if match_found:
position.append(lowercase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 684 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.