code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class a_ ( snake_case_ ):
'''simple docstring'''
pass
class a_ :
'''simple docstring'''
def __init__( self , A ) -> None:
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = None
def __iter__( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self
_SCREAMING_SNAKE_CASE = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(A )
yield node.data
_SCREAMING_SNAKE_CASE = node.next_node
@property
def snake_case_( self ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
lowercase_ = Node(1)
lowercase_ = Node(2)
lowercase_ = Node(3)
lowercase_ = Node(4)
print(root_node.has_loop) # False
lowercase_ = root_node.next_node
print(root_node.has_loop) # True
lowercase_ = Node(5)
lowercase_ = Node(6)
lowercase_ = Node(5)
lowercase_ = Node(6)
print(root_node.has_loop) # False
lowercase_ = Node(1)
print(root_node.has_loop) # False
| 314 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
lowercase_ = tuple[int, int]
class a_ :
'''simple docstring'''
def __init__( self , A , A ) -> None:
_SCREAMING_SNAKE_CASE = vertices
_SCREAMING_SNAKE_CASE = {
(min(A ), max(A )): weight for edge, weight in edges.items()
}
def snake_case_( self , A , A ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_SCREAMING_SNAKE_CASE = weight
def snake_case_( self ) -> Graph:
_SCREAMING_SNAKE_CASE = Graph({min(self.vertices )} , {} )
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
while len(subgraph.vertices ) < len(self.vertices ):
_SCREAMING_SNAKE_CASE = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_SCREAMING_SNAKE_CASE = edge
_SCREAMING_SNAKE_CASE = weight
subgraph.add_edge(A , A )
return subgraph
def lowerCamelCase ( __lowerCamelCase : str = "p107_network.txt" ) ->int:
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
with open(__lowerCamelCase ) as f:
_SCREAMING_SNAKE_CASE = f.read().strip().split("""\n""" )
_SCREAMING_SNAKE_CASE = [line.split(""",""" ) for line in data]
for edgea in range(1 , len(__lowerCamelCase ) ):
for edgea in range(__lowerCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
_SCREAMING_SNAKE_CASE = int(adjaceny_matrix[edgea][edgea] )
_SCREAMING_SNAKE_CASE = Graph(set(range(len(__lowerCamelCase ) ) ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = graph.prims_algorithm()
_SCREAMING_SNAKE_CASE = sum(graph.edges.values() )
_SCREAMING_SNAKE_CASE = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 314 | 1 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : bytes ):
'''simple docstring'''
return "".join([hex(SCREAMING_SNAKE_CASE_ )[2:].zfill(2 ).upper() for byte in list(SCREAMING_SNAKE_CASE_ )] )
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
if (len(SCREAMING_SNAKE_CASE_ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(SCREAMING_SNAKE_CASE_ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = "▁"
_SCREAMING_SNAKE_CASE = {"vocab_file": "sentencepiece.bpe.model"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
_SCREAMING_SNAKE_CASE = {
"facebook/xglm-564M": 20_48,
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : int = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : str = ["input_ids", "attention_mask"]
def __init__( self , _lowerCAmelCase , _lowerCAmelCase="<s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="</s>" , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> None:
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_lowerCAmelCase = 7
_lowerCAmelCase = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
_lowerCAmelCase = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
_lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_lowerCAmelCase = len(self.sp_model )
_lowerCAmelCase = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_lowerCAmelCase )
_lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[str]:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
_lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _lowerCAmelCase ) -> Optional[int]:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_lowerCAmelCase = {}
_lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_lowerCAmelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase ))
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase ))
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> List[int]:
_lowerCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _snake_case ( self ) -> str:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _snake_case ( self ) -> Any:
_lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> List[str]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase = self.sp_model.PieceToId(_lowerCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , _lowerCAmelCase ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , _lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = "".join(_lowerCAmelCase ).replace(_lowerCAmelCase , " " ).strip()
return out_string
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(_lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 489 | 0 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __magic_name__ ( A__ ):
def __init__( self : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]=13 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : Optional[int]=37 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : int=5_12 , UpperCamelCase__ : List[str]=16 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : str=4 , UpperCamelCase__ : Dict=None , ) -> int:
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> str:
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = DistilBertModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = DistilBertForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = DistilBertForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = DistilBertForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = DistilBertForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = self.num_choices
UpperCAmelCase = DistilBertForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( A__, A__, unittest.TestCase ):
lowercase : Union[str, Any] =(
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase : int =(
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase : Optional[int] =True
lowercase : Dict =True
lowercase : List[Any] =True
lowercase : List[Any] =True
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = DistilBertModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=UpperCamelCase__ , dim=37 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = DistilBertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
UpperCAmelCase = True
UpperCAmelCase = model_class(config=UpperCamelCase__ )
UpperCAmelCase = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = torch.jit.trace(
UpperCamelCase__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , "traced_model.pt" ) )
UpperCAmelCase = torch.jit.load(os.path.join(UpperCamelCase__ , "traced_model.pt" ) , map_location=UpperCamelCase__ )
loaded(inputs_dict["input_ids"].to(UpperCamelCase__ ) , inputs_dict["attention_mask"].to(UpperCamelCase__ ) )
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Any:
'''simple docstring'''
UpperCAmelCase = DistilBertModel.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
UpperCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
UpperCAmelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCAmelCase = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1e-4 ) )
| 323 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( A__, unittest.TestCase ):
lowercase : Optional[Any] =RobertaTokenizer
lowercase : Dict =RobertaTokenizerFast
lowercase : Union[str, Any] =True
lowercase : Dict ={'''cls_token''': '''<s>'''}
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
UpperCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase = {"unk_token": "<unk>"}
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase__ ) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , **UpperCamelCase__ : Dict ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **UpperCamelCase__ : Dict ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = "lower newer"
UpperCAmelCase = "lower newer"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase = "lower newer"
UpperCAmelCase = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
UpperCAmelCase = tokenizer.tokenize(UpperCamelCase__ ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = tokens + [tokenizer.unk_token]
UpperCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
'''simple docstring'''
UpperCAmelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=UpperCamelCase__ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=UpperCamelCase__ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Dict:
'''simple docstring'''
UpperCAmelCase = self.tokenizer_class.from_pretrained("roberta-base" )
UpperCAmelCase = tokenizer.encode("sequence builders" , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode(
"sequence builders" , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = "Encode this sequence."
UpperCAmelCase = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
UpperCAmelCase = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
UpperCAmelCase = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing spaces after special tokens
UpperCAmelCase = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )} ) # mask token has a left space
UpperCAmelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
UpperCAmelCase = "Encode <mask> sequence"
UpperCAmelCase = "Encode <mask>sequence"
UpperCAmelCase = tokenizer.encode(UpperCamelCase__ )
UpperCAmelCase = encoded.index(UpperCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = tokenizer.encode(UpperCamelCase__ )
UpperCAmelCase = encoded.index(UpperCamelCase__ )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase = "A, <mask> AllenNLP sentence."
UpperCAmelCase = tokenizer_r.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
UpperCAmelCase = tokenizer_p.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
UpperCamelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , UpperCamelCase__ )
self.assertEqual(post_processor_state["add_prefix_space"] , UpperCamelCase__ )
self.assertEqual(post_processor_state["trim_offsets"] , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCAmelCase = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase = F'{text_of_1_token} {text_of_1_token}'
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
UpperCAmelCase = F' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ) + 1, 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
UpperCAmelCase = tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
| 323 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class a__ :
snake_case__ = 4_2 # [batch_size x 3]
snake_case__ = 4_2 # [batch_size x 3]
snake_case__ = 4_2 # [batch_size x 3]
snake_case__ = 4_2 # [batch_size x 3]
snake_case__ = 4_2
snake_case__ = 4_2
snake_case__ = 4_2
snake_case__ = 4_2
snake_case__ = 4_2
def __UpperCamelCase ( self : Optional[int]) -> List[str]:
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape) == len(self.y.shape) == len(self.z.shape) == len(self.origin.shape) == 2
def __UpperCamelCase ( self : int) -> Any:
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] ,dtype=np.floataa))
def __UpperCamelCase ( self : Any) -> List[str]:
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] ,dtype=np.floataa))
def __UpperCamelCase ( self : int) -> torch.Tensor:
"""simple docstring"""
_lowerCAmelCase:int = torch.arange(self.height * self.width)
_lowerCAmelCase:str = torch.stack(
[
pixel_indices % self.width,
torch.div(a__ ,self.width ,rounding_mode='''trunc'''),
] ,axis=1 ,)
return coords
@property
def __UpperCamelCase ( self : int) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Dict = self.shape
_lowerCAmelCase:List[Any] = int(np.prod(a__))
_lowerCAmelCase:List[str] = self.get_image_coords()
_lowerCAmelCase:Union[str, Any] = torch.broadcast_to(coords.unsqueeze(0) ,[batch_size * inner_batch_size, *coords.shape])
_lowerCAmelCase:str = self.get_camera_rays(a__)
_lowerCAmelCase:Tuple = rays.view(a__ ,inner_batch_size * self.height * self.width ,2 ,3)
return rays
def __UpperCamelCase ( self : Dict ,a__ : torch.Tensor) -> torch.Tensor:
"""simple docstring"""
_lowerCAmelCase:Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_lowerCAmelCase:int = coords.view(a__ ,-1 ,2)
_lowerCAmelCase:Optional[int] = self.resolution()
_lowerCAmelCase:Union[str, Any] = self.fov()
_lowerCAmelCase:Tuple = (flat.float() / (res - 1)) * 2 - 1
_lowerCAmelCase:Union[str, Any] = fracs * torch.tan(fov / 2)
_lowerCAmelCase:str = fracs.view(a__ ,-1 ,2)
_lowerCAmelCase:Tuple = (
self.z.view(a__ ,1 ,3)
+ self.x.view(a__ ,1 ,3) * fracs[:, :, :1]
+ self.y.view(a__ ,1 ,3) * fracs[:, :, 1:]
)
_lowerCAmelCase:str = directions / directions.norm(dim=-1 ,keepdim=a__)
_lowerCAmelCase:List[str] = torch.stack(
[
torch.broadcast_to(self.origin.view(a__ ,1 ,3) ,[batch_size, directions.shape[1], 3]),
directions,
] ,dim=2 ,)
return rays.view(a__ ,*a__ ,2 ,3)
def __UpperCamelCase ( self : int ,a__ : int ,a__ : int) -> "DifferentiableProjectiveCamera":
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin ,x=self.x ,y=self.y ,z=self.z ,width=a__ ,height=a__ ,x_fov=self.x_fov ,y_fov=self.y_fov ,)
def UpperCAmelCase ( snake_case : int ):
_lowerCAmelCase:Dict = []
_lowerCAmelCase:int = []
_lowerCAmelCase:Union[str, Any] = []
_lowerCAmelCase:Union[str, Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
_lowerCAmelCase:int = np.array([np.sin(snake_case ), np.cos(snake_case ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_lowerCAmelCase:Union[str, Any] = -z * 4
_lowerCAmelCase:List[str] = np.array([np.cos(snake_case ), -np.sin(snake_case ), 0.0] )
_lowerCAmelCase:int = np.cross(snake_case , snake_case )
origins.append(snake_case )
xs.append(snake_case )
ys.append(snake_case )
zs.append(snake_case )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(snake_case , axis=0 ) ).float() , x=torch.from_numpy(np.stack(snake_case , axis=0 ) ).float() , y=torch.from_numpy(np.stack(snake_case , axis=0 ) ).float() , z=torch.from_numpy(np.stack(snake_case , axis=0 ) ).float() , width=snake_case , height=snake_case , x_fov=0.7 , y_fov=0.7 , shape=(1, len(snake_case )) , )
| 719 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class a__ ( UpperCamelCase_ ):
snake_case__ = '''roc_bert'''
def __init__( self : Union[str, Any] ,a__ : Union[str, Any]=3_0522 ,a__ : List[Any]=768 ,a__ : Tuple=12 ,a__ : Optional[int]=12 ,a__ : Any=3072 ,a__ : Optional[int]="gelu" ,a__ : Union[str, Any]=0.1 ,a__ : List[str]=0.1 ,a__ : Dict=512 ,a__ : int=2 ,a__ : Dict=0.02 ,a__ : Dict=1E-12 ,a__ : int=True ,a__ : Optional[int]=0 ,a__ : Union[str, Any]="absolute" ,a__ : List[Any]=None ,a__ : str=True ,a__ : str=True ,a__ : List[str]=768 ,a__ : Optional[int]=910 ,a__ : Any=512 ,a__ : str=2_4858 ,a__ : List[str]=True ,**a__ : str ,) -> List[str]:
"""simple docstring"""
_lowerCAmelCase:Tuple = vocab_size
_lowerCAmelCase:Any = max_position_embeddings
_lowerCAmelCase:Union[str, Any] = hidden_size
_lowerCAmelCase:Optional[Any] = num_hidden_layers
_lowerCAmelCase:int = num_attention_heads
_lowerCAmelCase:int = intermediate_size
_lowerCAmelCase:Union[str, Any] = hidden_act
_lowerCAmelCase:Any = hidden_dropout_prob
_lowerCAmelCase:List[Any] = attention_probs_dropout_prob
_lowerCAmelCase:List[Any] = initializer_range
_lowerCAmelCase:Dict = type_vocab_size
_lowerCAmelCase:Dict = layer_norm_eps
_lowerCAmelCase:str = use_cache
_lowerCAmelCase:Any = enable_pronunciation
_lowerCAmelCase:List[str] = enable_shape
_lowerCAmelCase:Optional[int] = pronunciation_embed_dim
_lowerCAmelCase:Union[str, Any] = pronunciation_vocab_size
_lowerCAmelCase:str = shape_embed_dim
_lowerCAmelCase:List[Any] = shape_vocab_size
_lowerCAmelCase:str = concat_input
_lowerCAmelCase:Optional[int] = position_embedding_type
_lowerCAmelCase:Any = classifier_dropout
super().__init__(pad_token_id=a__ ,**a__)
| 439 | 0 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ : int = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ : List[str] = parent
lowercase__ : str = batch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : Optional[int] = image_size
lowercase__ : str = min_resolution
lowercase__ : str = max_resolution
lowercase__ : Tuple = do_resize
lowercase__ : Optional[int] = size
lowercase__ : Optional[int] = do_normalize
lowercase__ : Optional[Any] = image_mean
lowercase__ : int = image_std
def __a ( self ) -> Any:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCAmelCase( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[int] = DPTImageProcessor if is_vision_available() else None
def __a ( self ) -> List[str]:
"""simple docstring"""
lowercase__ : int = DPTImageProcessingTester(self )
@property
def __a ( self ) -> Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def __a ( self ) -> Any:
"""simple docstring"""
lowercase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowercase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __a ( self ) -> Tuple:
"""simple docstring"""
lowercase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
lowercase__ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowercase__ : Union[str, Any] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
lowercase__ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowercase__ : str = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __a ( self ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
lowercase__ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowercase__ : Optional[int] = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , ) | 397 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 477 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase__ = StableDiffusionSAGPipeline
UpperCAmelCase__ = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ = False
def lowerCamelCase_ ( self :Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase_ : List[Any] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
UpperCamelCase_ : Union[str, Any] =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
UpperCamelCase_ : List[str] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ : Optional[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase_ : Optional[Any] =CLIPTextModel(_lowerCamelCase )
UpperCamelCase_ : Optional[int] =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ : str ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCamelCase_ ( self :Any , _lowerCamelCase :Optional[int] , _lowerCamelCase :Any=0 ):
'''simple docstring'''
if str(_lowerCamelCase ).startswith('mps' ):
UpperCamelCase_ : Union[str, Any] =torch.manual_seed(_lowerCamelCase )
else:
UpperCamelCase_ : Dict =torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
UpperCamelCase_ : Optional[Any] ={
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : int =StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
UpperCamelCase_ : Tuple =sag_pipe.to(_lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_ : Any ='.'
UpperCamelCase_ : Optional[Any] =torch.manual_seed(0 )
UpperCamelCase_ : List[Any] =sag_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ : Optional[int] =output.images
UpperCamelCase_ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ : Optional[int] =np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ : Tuple =sag_pipe.to(_lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_ : List[Any] ='.'
UpperCamelCase_ : List[str] =torch.manual_seed(0 )
UpperCamelCase_ : Optional[int] =sag_pipe(
[prompt] , generator=_lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
UpperCamelCase_ : str =output.images
UpperCamelCase_ : Any =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase_ : List[str] =np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
UpperCamelCase_ : Dict =StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
UpperCamelCase_ : str =sag_pipe.to(_lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_ : Tuple ='.'
UpperCamelCase_ : List[Any] =torch.manual_seed(0 )
UpperCamelCase_ : List[Any] =sag_pipe(
[prompt] , width=768 , height=512 , generator=_lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
UpperCamelCase_ : Dict =output.images
assert image.shape == (1, 512, 768, 3)
| 706 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def A_ ( __lowercase ):
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def A_ ( __lowercase ):
class a__ :
def __init__( self :int , _lowerCamelCase :Any ):
'''simple docstring'''
UpperCamelCase_ : str =metric_id
class a__ :
UpperCAmelCase__ = [MetricMock(A__ ) for metric_id in ['''accuracy''', '''mse''', '''precision''', '''codeparrot/apps_metric''']]
def lowerCamelCase_ ( self :Optional[int] ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
if "tmp_path" in args:
UpperCamelCase_ : List[Any] =tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(__lowercase , match='https://huggingface.co/docs/evaluate' ):
func(*__lowercase )
| 395 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , a ):
"""simple docstring"""
snake_case_ :str = value
snake_case_ :Node | None = None
snake_case_ :Node | None = None
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , a ):
"""simple docstring"""
snake_case_ :str = tree
def _a ( self , a ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 584 |
"""simple docstring"""
def A ( _A, _A ):
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(_A, x % y )
def A ( _A, _A ):
"""simple docstring"""
return (x * y) // greatest_common_divisor(_A, _A )
def A ( _A = 20 ):
"""simple docstring"""
snake_case_ :Dict = 1
for i in range(1, n + 1 ):
snake_case_ :Dict = lcm(_A, _A )
return g
if __name__ == "__main__":
print(F'''{solution() = }''')
| 584 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__SCREAMING_SNAKE_CASE : List[str] = 5_0_0_0_0_0
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.split(__file__)
__SCREAMING_SNAKE_CASE : Any = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowerCAmelCase_( lowercase_ : datasets.Dataset , **lowercase_ : Union[str, Any] ) -> List[str]:
_lowerCamelCase = dataset.map(**lowercase_ )
@get_duration
def lowerCAmelCase_( lowercase_ : datasets.Dataset , **lowercase_ : Optional[int] ) -> Any:
_lowerCamelCase = dataset.filter(**lowercase_ )
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
_lowerCamelCase = generate_example_dataset(
os.path.join(lowercase_ , '''dataset.arrow''' ) , lowercase_ , num_examples=lowercase_ )
_lowerCamelCase = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=lowercase_ )
def tokenize(lowercase_ : Dict ):
return tokenizer(examples['''text'''] )
_lowerCamelCase = map(lowercase_ )
_lowerCamelCase = map(lowercase_ , batched=lowercase_ )
_lowerCamelCase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type='''numpy''' ):
_lowerCamelCase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type='''pandas''' ):
_lowerCamelCase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
_lowerCamelCase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
_lowerCamelCase = map(lowercase_ , function=lambda lowercase_ : None , batched=lowercase_ )
_lowerCamelCase = map(lowercase_ , function=lowercase_ , batched=lowercase_ )
_lowerCamelCase = filter(lowercase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase_ , '''wb''' ) as f:
f.write(json.dumps(lowercase_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 623 |
"""simple docstring"""
def lowerCAmelCase_( lowercase_ : str , lowercase_ : str ) -> bool:
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
_lowerCamelCase = True
for i in range(lowercase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
_lowerCamelCase = True
if a[i].islower():
_lowerCamelCase = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Any = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 348 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Any = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 348 | 1 |
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class _a (_lowerCamelCase):
"""simple docstring"""
warnings.warn(
'Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '
'be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.' , _lowerCamelCase , )
| 0 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase__ : Tuple = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase__ : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase__ : str = "sshleifer/tiny-mbart"
@require_torch
class _a (_lowerCamelCase):
"""simple docstring"""
def UpperCamelCase ( self , A__=False , A__=None , A__=True , A__=True , A__=True , A__=True , ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=A__ , num_train_epochs=1 , distributed=A__ , extra_args_str=A__ , predict_with_generate=A__ , do_train=A__ , do_eval=A__ , do_predict=A__ , )
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history
if not do_eval:
return
_SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()]
_SCREAMING_SNAKE_CASE = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
_SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , A__ )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def UpperCamelCase ( self ) -> Optional[int]:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def UpperCamelCase ( self ) -> Optional[Any]:
self.run_seqaseq_quick(distributed=A__ )
@require_torch_multi_gpu
def UpperCamelCase ( self ) -> Union[str, Any]:
self.run_seqaseq_quick(distributed=A__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> Any:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> Tuple:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> str:
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=A__ )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def UpperCamelCase ( self ) -> List[str]:
self.run_seqaseq_quick(
distributed=A__ , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=A__ )
@require_apex
@require_torch_gpu
def UpperCamelCase ( self ) -> Optional[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A__ , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def UpperCamelCase ( self , A__ ) -> List[Any]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
_SCREAMING_SNAKE_CASE = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
_SCREAMING_SNAKE_CASE = experiments[experiment_id]
_SCREAMING_SNAKE_CASE = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
_SCREAMING_SNAKE_CASE = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A__ , extra_args_str=data["""extra_args_str"""] )
_SCREAMING_SNAKE_CASE = len(re.findall(A__ , cl.err ) )
self.assertEqual(A__ , data["""n_matches"""] )
@slow
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=10 , distributed=A__ , )
# Check metrics
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(os.path.join(A__ , """trainer_state.json""" ) ).log_history
_SCREAMING_SNAKE_CASE = [log for log in logs if """eval_loss""" in log.keys()]
_SCREAMING_SNAKE_CASE = eval_metrics[0]
_SCREAMING_SNAKE_CASE = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , A__ )
# test if do_predict saves generations and metrics
_SCREAMING_SNAKE_CASE = os.listdir(A__ )
_SCREAMING_SNAKE_CASE = {os.path.basename(A__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def UpperCamelCase ( self ) -> Dict:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A__ ) -> Tuple[int, float]:
_SCREAMING_SNAKE_CASE = """--skip_memory_metrics 0"""
_SCREAMING_SNAKE_CASE = self.run_trainer(
max_len=1_28 , model_name=A__ , learning_rate=3E-4 , num_train_epochs=1 , optim=A__ , distributed=A__ , extra_args_str=A__ , do_eval=A__ , do_predict=A__ , n_gpus_to_use=1 , )
# Check metrics
_SCREAMING_SNAKE_CASE = TrainerState.load_from_json(Path(A__ , """trainer_state.json""" ) ).log_history
_SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
_SCREAMING_SNAKE_CASE = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
_SCREAMING_SNAKE_CASE = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
_SCREAMING_SNAKE_CASE = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE = gpu_peak_mem_orig + gpu_alloc_mem_orig
_SCREAMING_SNAKE_CASE = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
_SCREAMING_SNAKE_CASE = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
_SCREAMING_SNAKE_CASE = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A__ , A__ , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
A__ , A__ , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
A__ , A__ , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = 3E-3 , A__ = "adafactor" , A__ = False , A__ = None , A__ = 0 , A__ = True , A__ = True , A__ = True , A__ = True , A__ = None , ) -> Dict:
_SCREAMING_SNAKE_CASE = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
_SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(A__ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(A__ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
_SCREAMING_SNAKE_CASE = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(A__ )}\n ".split()
_SCREAMING_SNAKE_CASE = """
--do_predict
""".split()
_SCREAMING_SNAKE_CASE = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
_SCREAMING_SNAKE_CASE = get_gpu_count()
_SCREAMING_SNAKE_CASE = get_torch_dist_unique_port()
_SCREAMING_SNAKE_CASE = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
_SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A__ , env=self.get_env() )
else:
_SCREAMING_SNAKE_CASE = ["""run_translation.py"""] + args
with patch.object(A__ , """argv""" , A__ ):
main()
return output_dir
| 0 | 1 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __a ( lowerCAmelCase_ : Optional[int] ) -> List[Any]:
'''simple docstring'''
return getitem, k
def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return setitem, k, v
def __a ( lowerCAmelCase_ : Any ) -> str:
'''simple docstring'''
return delitem, k
def __a ( lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Optional[int] ,*lowerCAmelCase_ : Optional[int] ) -> int:
'''simple docstring'''
try:
return fun(lowerCamelCase__ ,*lowerCamelCase__ ), None
except Exception as e:
return None, e
__A = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
__A = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
__A = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
__A = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
__A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
"""operations""" ,(
pytest.param(_add_items ,id="""add items""" ),
pytest.param(_overwrite_items ,id="""overwrite items""" ),
pytest.param(_delete_items ,id="""delete items""" ),
pytest.param(_access_absent_items ,id="""access absent items""" ),
pytest.param(_add_with_resize_up ,id="""add with resize up""" ),
pytest.param(_add_with_resize_down ,id="""add with resize down""" ),
) ,)
def __a ( lowerCAmelCase_ : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_= HashMap(initial_block_size=4 )
UpperCAmelCase_= {}
for _, (fun, *args) in enumerate(lowerCamelCase__ ):
UpperCAmelCase_, UpperCAmelCase_= _run_operation(lowerCamelCase__ ,lowerCamelCase__ ,*lowerCamelCase__ )
UpperCAmelCase_, UpperCAmelCase_= _run_operation(lowerCamelCase__ ,lowerCamelCase__ ,*lowerCamelCase__ )
assert my_res == py_res
assert str(lowerCamelCase__ ) == str(lowerCamelCase__ )
assert set(lowerCamelCase__ ) == set(lowerCamelCase__ )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
assert set(my.items() ) == set(py.items() )
def __a ( ) -> List[str]:
'''simple docstring'''
def is_public(lowerCAmelCase_ : List[Any] ) -> bool:
return not name.startswith("""_""" )
UpperCAmelCase_= {name for name in dir({} ) if is_public(lowerCamelCase__ )}
UpperCAmelCase_= {name for name in dir(HashMap() ) if is_public(lowerCamelCase__ )}
assert dict_public_names > hash_public_names
| 593 | """simple docstring"""
import math
from datetime import datetime, timedelta
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = year % 19
lowerCAmelCase__ = year % 4
lowerCAmelCase__ = year % 7
lowerCAmelCase__ = math.floor(year / 100 )
lowerCAmelCase__ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
lowerCAmelCase__ = leap_day_inhibits / 4
lowerCAmelCase__ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
lowerCAmelCase__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
lowerCAmelCase__ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
lowerCAmelCase__ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(lowerCamelCase__ , 4 , 18 )
else:
return datetime(lowerCamelCase__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
__lowerCAmelCase : List[str] = "will be" if year > datetime.now().year else "was"
print(F"Easter in {year} {tense} {gauss_easter(year)}")
| 644 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(_lowerCamelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 716 |
"""simple docstring"""
from typing import Any
def __lowerCAmelCase ( lowercase : list , lowercase : list , lowercase : dict , lowercase : dict , lowercase : dict , ) -> list:
"""simple docstring"""
_validation(
lowercase , lowercase , lowercase , lowercase , lowercase , )
# Creates data structures and fill initial step
snake_case : dict = {}
snake_case : dict = {}
for state in states_space:
snake_case : int = observations_space[0]
snake_case : Any = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case : Union[str, Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase ) ):
snake_case : Optional[Any] = observations_space[o]
snake_case : str = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case : str = ""
snake_case : List[Any] = -1
for k_state in states_space:
snake_case : Tuple = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case : Optional[Any] = probability
snake_case : int = k_state
# Update probabilities and pointers dicts
snake_case : List[str] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case : List[Any] = arg_max
# The final observation
snake_case : Dict = observations_space[len(lowercase ) - 1]
# argmax for given final observation
snake_case : str = ""
snake_case : Optional[int] = -1
for k_state in states_space:
snake_case : int = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case : Optional[int] = probability
snake_case : List[Any] = k_state
snake_case : str = arg_max
# Process pointers backwards
snake_case : List[str] = last_state
snake_case : Optional[int] = []
for o in range(len(lowercase ) - 1 , -1 , -1 ):
result.append(lowercase )
snake_case : List[str] = pointers[previous, observations_space[o]]
result.reverse()
return result
def __lowerCAmelCase ( lowercase : Any , lowercase : Any , lowercase : Any , lowercase : Any , lowercase : Any , ) -> None:
"""simple docstring"""
_validate_not_empty(
lowercase , lowercase , lowercase , lowercase , lowercase , )
_validate_lists(lowercase , lowercase )
_validate_dicts(
lowercase , lowercase , lowercase )
def __lowerCAmelCase ( lowercase : Any , lowercase : Any , lowercase : Any , lowercase : Any , lowercase : Any , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def __lowerCAmelCase ( lowercase : Any , lowercase : Any ) -> None:
"""simple docstring"""
_validate_list(lowercase , "observations_space" )
_validate_list(lowercase , "states_space" )
def __lowerCAmelCase ( lowercase : Any , lowercase : str ) -> None:
"""simple docstring"""
if not isinstance(_object , lowercase ):
snake_case : List[str] = F'{var_name} must be a list'
raise ValueError(lowercase )
else:
for x in _object:
if not isinstance(lowercase , lowercase ):
snake_case : Tuple = F'{var_name} must be a list of strings'
raise ValueError(lowercase )
def __lowerCAmelCase ( lowercase : Any , lowercase : Any , lowercase : Any , ) -> None:
"""simple docstring"""
_validate_dict(lowercase , "initial_probabilities" , lowercase )
_validate_nested_dict(lowercase , "transition_probabilities" )
_validate_nested_dict(lowercase , "emission_probabilities" )
def __lowerCAmelCase ( lowercase : Any , lowercase : str ) -> None:
"""simple docstring"""
_validate_dict(_object , lowercase , lowercase )
for x in _object.values():
_validate_dict(lowercase , lowercase , lowercase , lowercase )
def __lowerCAmelCase ( lowercase : Any , lowercase : str , lowercase : type , lowercase : bool = False ) -> None:
"""simple docstring"""
if not isinstance(_object , lowercase ):
snake_case : int = F'{var_name} must be a dict'
raise ValueError(lowercase )
if not all(isinstance(lowercase , lowercase ) for x in _object ):
snake_case : Optional[Any] = F'{var_name} all keys must be strings'
raise ValueError(lowercase )
if not all(isinstance(lowercase , lowercase ) for x in _object.values() ):
snake_case : Optional[int] = "nested dictionary " if nested else ""
snake_case : int = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(lowercase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 117 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
a = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
a = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __magic_name__ ( __UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for i in range(len(__UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__SCREAMING_SNAKE_CASE = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__UpperCAmelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__UpperCAmelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__UpperCAmelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__SCREAMING_SNAKE_CASE = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__UpperCAmelCase )
return next_generation
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> list[Image.Image]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for _ in range(__UpperCAmelCase ):
# Create output image
__SCREAMING_SNAKE_CASE = Image.new("""RGB""" , (len(cells[0] ), len(__UpperCAmelCase )) )
__SCREAMING_SNAKE_CASE = img.load()
# Save cells to image
for x in range(len(__UpperCAmelCase ) ):
for y in range(len(cells[0] ) ):
__SCREAMING_SNAKE_CASE = 255 - cells[y][x] * 255
__SCREAMING_SNAKE_CASE = (colour, colour, colour)
# Save image
images.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = new_generation(__UpperCAmelCase )
return images
if __name__ == "__main__":
a = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 109 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ : int = "RegNetConfig"
# Base docstring
UpperCAmelCase__ : Optional[int] = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[int] = [1, 10_88, 7, 7]
# Image classification docstring
UpperCAmelCase__ : Tuple = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[Any] = "tabby, tabby cat"
UpperCAmelCase__ : int = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
def __init__( self : str , __magic_name__ : int , __magic_name__ : int = 3 , __magic_name__ : int = 1 , __magic_name__ : int = 1 , __magic_name__ : Optional[str] = "relu" , **__magic_name__ : int , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCAmelCase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=__magic_name__ , strides=__magic_name__ , padding="VALID" , groups=__magic_name__ , use_bias=__magic_name__ , name="convolution" , )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
lowerCAmelCase__ = ACTaFN[activation] if activation is not None else tf.identity
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.convolution(self.padding(__magic_name__ ) )
lowerCAmelCase__ = self.normalization(__magic_name__ )
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , __magic_name__ : RegNetConfig , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config.num_channels
lowerCAmelCase__ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = shape_list(__magic_name__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 2, 3, 1) )
lowerCAmelCase__ = self.embedder(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Any , __magic_name__ : int , __magic_name__ : int = 2 , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=1 , strides=__magic_name__ , use_bias=__magic_name__ , name="convolution" )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : bool = False ):
"""simple docstring"""
return self.normalization(self.convolution(__magic_name__ ) , training=__magic_name__ )
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
lowerCAmelCase__ = [
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.pooler(__magic_name__ )
for layer_module in self.attention:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.2" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(__magic_name__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.3" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 2 , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowerCAmelCase__ = [
# downsampling is done in the first layer with stride of 2
layer(__magic_name__ , __magic_name__ , __magic_name__ , stride=__magic_name__ , name="layers.0" ),
*[layer(__magic_name__ , __magic_name__ , __magic_name__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] ):
"""simple docstring"""
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__magic_name__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__magic_name__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__magic_name__ , __magic_name__ , __magic_name__ , depth=__magic_name__ , name=f"""stages.{i+1}""" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : tf.Tensor , __magic_name__ : bool = False , __magic_name__ : bool = True ):
"""simple docstring"""
lowerCAmelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
lowerCAmelCase__ = stage_module(__magic_name__ )
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__magic_name__ , hidden_states=__magic_name__ )
@keras_serializable
class A ( tf.keras.layers.Layer ):
snake_case__ :List[Any] = RegNetConfig
def __init__( self : str , __magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config
lowerCAmelCase__ = TFRegNetEmbeddings(__magic_name__ , name="embedder" )
lowerCAmelCase__ = TFRegNetEncoder(__magic_name__ , name="encoder" )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
@unpack_inputs
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.embedder(__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = self.encoder(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = encoder_outputs[0]
lowerCAmelCase__ = self.pooler(__magic_name__ )
# Change to NCHW output format have uniformity in the modules
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCAmelCase__ = tuple([tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :str = RegNetConfig
snake_case__ :Optional[Any] = 'regnet'
snake_case__ :Tuple = 'pixel_values'
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
UpperCAmelCase__ : List[str] = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ : Tuple = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , __magic_name__ : RegNetConfig , *__magic_name__ : Optional[int] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : int=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
pixel_values=__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , *__magic_name__ : Tuple , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
# classification head
lowerCAmelCase__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : tf.Tensor = None , __magic_name__ : tf.Tensor = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier[0](__magic_name__ )
lowerCAmelCase__ = self.classifier[1](__magic_name__ )
lowerCAmelCase__ = None if labels is None else self.hf_compute_loss(labels=__magic_name__ , logits=__magic_name__ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
| 48 | 0 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowerCamelCase__ : Optional[int] = datasets.utils.logging.get_logger(__name__)
@dataclass
class _snake_case ( datasets.BuilderConfig ):
__lowerCAmelCase : int = 10_000
__lowerCAmelCase : Optional[List[str]] = None
__lowerCAmelCase : Optional[datasets.Features] = None
class _snake_case ( datasets.ArrowBasedBuilder ):
__lowerCAmelCase : Any = ParquetConfig
def lowercase__ ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}')
lowercase__ : List[Any] = dl_manager.download_and_extract(self.config.data_files)
if isinstance(SCREAMING_SNAKE_CASE_ , (str, list, tuple)):
lowercase__ : str = data_files
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
lowercase__ : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase__ : Optional[Any] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files})]
lowercase__ : int = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
lowercase__ : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowercase__ : List[str] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_):
with open(SCREAMING_SNAKE_CASE_ , """rb""") as f:
lowercase__ : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(SCREAMING_SNAKE_CASE_))
break
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE_ , gen_kwargs={"""files""": files}))
return splits
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase__ : List[Any] = table_cast(SCREAMING_SNAKE_CASE_ , self.info.features.arrow_schema)
return pa_table
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : int = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'')
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_)):
with open(SCREAMING_SNAKE_CASE_ , """rb""") as f:
lowercase__ : int = pq.ParquetFile(SCREAMING_SNAKE_CASE_)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
lowercase__ : Tuple = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'{file_idx}_{batch_idx}', self._cast_table(SCREAMING_SNAKE_CASE_)
except ValueError as e:
logger.error(f'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE_)}: {e}')
raise
| 495 |
def UpperCamelCase ( lowercase_ = 10_00 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 495 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
SCREAMING_SNAKE_CASE = '▁'
class A_ ( __UpperCAmelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _A , _A=True , _A=True , _A=False , _A="[CLS]" , _A="[SEP]" , _A="<unk>" , _A="[SEP]" , _A="<pad>" , _A="[CLS]" , _A="[MASK]" , _A = None , **_A , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : int = (
AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ , normalized=UpperCamelCase__)
if isinstance(UpperCamelCase__ , UpperCamelCase__)
else mask_token
)
_UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
_UpperCAmelCase : List[Any] = do_lower_case
_UpperCAmelCase : Any = remove_space
_UpperCAmelCase : Optional[Any] = keep_accents
_UpperCAmelCase : Tuple = vocab_file
_UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCamelCase__)
@property
def snake_case__ ( self) -> Tuple:
"""simple docstring"""
return len(self.sp_model)
def snake_case__ ( self) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : str = {self.convert_ids_to_tokens(UpperCamelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> int:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.__dict__.copy()
_UpperCAmelCase : Optional[int] = None
return state
def __setstate__( self , _A) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
_UpperCAmelCase : List[str] = {}
_UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def snake_case__ ( self , _A) -> int:
"""simple docstring"""
if self.remove_space:
_UpperCAmelCase : Optional[int] = ''' '''.join(inputs.strip().split())
else:
_UpperCAmelCase : Tuple = inputs
_UpperCAmelCase : Optional[Any] = outputs.replace('''``''' , '''"''').replace('''\'\'''' , '''"''')
if not self.keep_accents:
_UpperCAmelCase : List[str] = unicodedata.normalize('''NFKD''' , UpperCamelCase__)
_UpperCAmelCase : Optional[Any] = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase__)])
if self.do_lower_case:
_UpperCAmelCase : Tuple = outputs.lower()
return outputs
def snake_case__ ( self , _A) -> str:
"""simple docstring"""
_UpperCAmelCase : Any = self.preprocess_text(UpperCamelCase__)
_UpperCAmelCase : Dict = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__)
_UpperCAmelCase : int = []
for piece in pieces:
if len(UpperCamelCase__) > 1 and piece[-1] == str(''',''') and piece[-2].isdigit():
_UpperCAmelCase : Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , ''''''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
_UpperCAmelCase : str = cur_pieces[1:]
else:
_UpperCAmelCase : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(UpperCamelCase__)
else:
new_pieces.append(UpperCamelCase__)
return new_pieces
def snake_case__ ( self , _A) -> Any:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCamelCase__)
def snake_case__ ( self , _A) -> List[str]:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCamelCase__)
def snake_case__ ( self , _A) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = []
_UpperCAmelCase : Union[str, Any] = ''''''
_UpperCAmelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase__) + token
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Any = []
else:
current_sub_tokens.append(UpperCamelCase__)
_UpperCAmelCase : Union[str, Any] = False
out_string += self.sp_model.decode(UpperCamelCase__)
return out_string.strip()
def snake_case__ ( self , _A , _A = None) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self , _A , _A = None , _A = False) -> str:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__)
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__)) + [1] + ([0] * len(UpperCamelCase__)) + [1]
return [1] + ([0] * len(UpperCamelCase__)) + [1]
def snake_case__ ( self , _A , _A = None) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = [self.sep_token_id]
_UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case__ ( self , _A , _A = None) -> Optional[Any]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCAmelCase : int = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCamelCase__)
elif not os.path.isfile(self.vocab_file):
with open(UpperCamelCase__ , '''wb''') as fi:
_UpperCAmelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__)
return (out_vocab_file,)
| 485 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __UpperCamelCase ( _A , _A=False ):
try:
lowerCAmelCase_ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCAmelCase_ = default
else:
# KEY is set, convert it to True or False.
try:
lowerCAmelCase_ = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
_A = parse_flag_from_env('''RUN_SLOW''', default=False)
_A = parse_flag_from_env('''RUN_REMOTE''', default=False)
_A = parse_flag_from_env('''RUN_LOCAL''', default=True)
_A = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
_A = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
_A = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
_A = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
_A = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
_A = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
_A = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
_A = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def __UpperCamelCase ( _A ):
try:
import faiss # noqa
except ImportError:
lowerCAmelCase_ = unittest.skip('''test requires faiss''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
try:
import regex # noqa
except ImportError:
lowerCAmelCase_ = unittest.skip('''test requires regex''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
try:
import elasticsearch # noqa
except ImportError:
lowerCAmelCase_ = unittest.skip('''test requires elasticsearch''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
try:
import sqlalchemy # noqa
except ImportError:
lowerCAmelCase_ = unittest.skip('''test requires sqlalchemy''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
if not config.TORCH_AVAILABLE:
lowerCAmelCase_ = unittest.skip('''test requires PyTorch''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
if not config.TF_AVAILABLE:
lowerCAmelCase_ = unittest.skip('''test requires TensorFlow''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
if not config.JAX_AVAILABLE:
lowerCAmelCase_ = unittest.skip('''test requires JAX''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
if not config.PIL_AVAILABLE:
lowerCAmelCase_ = unittest.skip('''test requires Pillow''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_A )
else:
return test_case
def __UpperCamelCase ( _A ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_A )
else:
return test_case
def __UpperCamelCase ( _A ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_A )
else:
return test_case
def __UpperCamelCase ( _A ):
def _require_spacy_model(_A ):
try:
import spacy # noqa F401
spacy.load(_A )
except ImportError:
return unittest.skip('''test requires spacy''' )(_A )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_A ) )(_A )
else:
return test_case
return _require_spacy_model
def __UpperCamelCase ( _A ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_A )
else:
return test_case
def __UpperCamelCase ( _A ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_A )
else:
return test_case
def __UpperCamelCase ( _A ):
if not _run_slow_tests or _run_slow_tests == 0:
lowerCAmelCase_ = unittest.skip('''test is slow''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
if not _run_local_tests or _run_local_tests == 0:
lowerCAmelCase_ = unittest.skip('''test is local''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
if not _run_packaged_tests or _run_packaged_tests == 0:
lowerCAmelCase_ = unittest.skip('''test is packaged''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
if not _run_remote_tests or _run_remote_tests == 0:
lowerCAmelCase_ = unittest.skip('''test requires remote''' )(_A )
return test_case
def __UpperCamelCase ( *_A ):
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_A ) and name.startswith('''test''' ):
for decorator in decorators:
lowerCAmelCase_ = decorator(_A )
setattr(cls , _A , _A )
return cls
return decorate
class A ( __UpperCAmelCase ):
pass
class A ( __UpperCAmelCase ):
__snake_case = 0
__snake_case = 1
__snake_case = 2
@contextmanager
def __UpperCamelCase ( _A=OfflineSimulationMode.CONNECTION_FAILS , _A=1E-1_6 ):
lowerCAmelCase_ = requests.Session().request
def timeout_request(_A , _A , _A , **_A ):
# Change the url to an invalid url so that the connection hangs
lowerCAmelCase_ = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
lowerCAmelCase_ = timeout
try:
return online_request(_A , _A , **_A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowerCAmelCase_ = url
lowerCAmelCase_ = e.args[0]
lowerCAmelCase_ = (max_retry_error.args[0].replace('''10.255.255.1''' , f"OfflineMock[{url}]" ),)
lowerCAmelCase_ = (max_retry_error,)
raise
def raise_connection_error(_A , _A , **_A ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _A ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __UpperCamelCase ( *_A , **_A ):
lowerCAmelCase_ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_A , **_A ) as tmp_dir:
try:
os.chdir(_A )
yield
finally:
os.chdir(_A )
@contextmanager
def __UpperCamelCase ( ):
import gc
gc.collect()
lowerCAmelCase_ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __UpperCamelCase ( ):
import gc
gc.collect()
lowerCAmelCase_ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __UpperCamelCase ( _A , _A ):
return deepcopy(_A ).integers(0 , 100 , 10 ).tolist() == deepcopy(_A ).integers(0 , 100 , 10 ).tolist()
def __UpperCamelCase ( _A ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_A , *_A , **_A ):
try:
return func(*_A , **_A )
except HTTPError as err:
if str(_A ).startswith('''500''' ) or str(_A ).startswith('''502''' ):
pytest.xfail(str(_A ) )
raise err
return decorator.decorator(_wrapper , _A )
class A :
def __init__( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = returncode
lowerCAmelCase_ = stdout
lowerCAmelCase_ = stderr
async def __UpperCamelCase ( _A , _A ):
while True:
lowerCAmelCase_ = await stream.readline()
if line:
callback(_A )
else:
break
async def __UpperCamelCase ( _A , _A=None , _A=None , _A=None , _A=False , _A=False ):
if echo:
print('''\nRunning: ''' , ''' '''.join(_A ) )
lowerCAmelCase_ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def tee(_A , _A , _A , _A="" ):
lowerCAmelCase_ = line.decode('''utf-8''' ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label='''stderr:''' ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def __UpperCamelCase ( _A , _A=None , _A=None , _A=180 , _A=False , _A=True ):
lowerCAmelCase_ = asyncio.get_event_loop()
lowerCAmelCase_ = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
lowerCAmelCase_ = ''' '''.join(_A )
if result.returncode > 0:
lowerCAmelCase_ = '''\n'''.join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def __UpperCamelCase ( ):
lowerCAmelCase_ = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
lowerCAmelCase_ = re.sub(r'''^gw''' , '''''' , _A , 0 , re.M )
return int(_A )
def __UpperCamelCase ( ):
lowerCAmelCase_ = 29500
lowerCAmelCase_ = pytest_xdist_worker_id()
return port + uniq_delta
| 431 | 0 |
'''simple docstring'''
from PIL import Image
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Any ) -> Image:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str =image.size
SCREAMING_SNAKE_CASE_ : List[str] =0
SCREAMING_SNAKE_CASE_ : Optional[Any] =image.load()
for i in range(__UpperCamelCase ):
for j in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Optional[int] =pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__UpperCamelCase ):
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : int =2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_lowercase = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 708 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
_lowercase = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[Any]:
return (preds == labels).mean()
@dataclass
class lowercase_ :
__lowerCamelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__lowerCamelCase = field(
default=A , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__lowerCamelCase = field(
default=A , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__lowerCamelCase = field(
default=A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class lowercase_ :
__lowerCamelCase = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} )
__lowerCamelCase = field(metadata={"help": "Should contain the data files for the task."} )
__lowerCamelCase = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__lowerCamelCase = field(
default=A , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ : Optional[Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCAmelCase_ )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE_ : List[Any] =processors[data_args.task_name]()
SCREAMING_SNAKE_CASE_ : Optional[Any] =processor.get_labels()
SCREAMING_SNAKE_CASE_ : int =len(UpperCAmelCase_ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ : Optional[Any] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE_ : List[str] =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE_ : int =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
SCREAMING_SNAKE_CASE_ : List[Any] =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(UpperCAmelCase_ : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE_ : Tuple =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCAmelCase_ , p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE_ : Dict =DataCollatorWithPadding(UpperCAmelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ : str =Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ : Dict ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ : Dict =trainer.evaluate()
SCREAMING_SNAKE_CASE_ : Any =os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(UpperCAmelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , UpperCAmelCase_ , UpperCAmelCase_ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(UpperCAmelCase_ )
return results
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 431 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Tuple = """sew"""
def __init__( self , lowerCamelCase_=3_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_=2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_="group" , lowerCamelCase_="gelu" , lowerCamelCase_=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCamelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase_=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase_=False , lowerCamelCase_=1_2_8 , lowerCamelCase_=1_6 , lowerCamelCase_=True , lowerCamelCase_=0.05 , lowerCamelCase_=1_0 , lowerCamelCase_=2 , lowerCamelCase_=0.0 , lowerCamelCase_=1_0 , lowerCamelCase_=0 , lowerCamelCase_="mean" , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=2_5_6 , lowerCamelCase_=0 , lowerCamelCase_=1 , lowerCamelCase_=2 , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
_a : Optional[int] = hidden_size
_a : int = feat_extract_norm
_a : Optional[int] = feat_extract_activation
_a : str = list(lowerCamelCase_ )
_a : Union[str, Any] = list(lowerCamelCase_ )
_a : List[Any] = list(lowerCamelCase_ )
_a : Union[str, Any] = conv_bias
_a : Optional[int] = num_conv_pos_embeddings
_a : Dict = num_conv_pos_embedding_groups
_a : str = len(self.conv_dim )
_a : Any = num_hidden_layers
_a : List[Any] = intermediate_size
_a : Tuple = squeeze_factor
_a : Tuple = hidden_act
_a : Any = num_attention_heads
_a : Optional[int] = hidden_dropout
_a : List[str] = attention_dropout
_a : Optional[Any] = activation_dropout
_a : str = feat_proj_dropout
_a : str = final_dropout
_a : str = layerdrop
_a : Optional[Any] = layer_norm_eps
_a : Optional[Any] = initializer_range
_a : Any = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a : str = apply_spec_augment
_a : List[Any] = mask_time_prob
_a : Optional[Any] = mask_time_length
_a : Union[str, Any] = mask_time_min_masks
_a : List[str] = mask_feature_prob
_a : List[str] = mask_feature_length
_a : str = mask_feature_min_masks
# ctc loss
_a : Any = ctc_loss_reduction
_a : Optional[Any] = ctc_zero_infinity
# sequence classification
_a : List[Any] = use_weighted_layer_sum
_a : Tuple = classifier_proj_size
@property
def __UpperCamelCase ( self ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 120 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : List[Any] = """segformer"""
def __init__( self , lowerCamelCase_=3 , lowerCamelCase_=4 , lowerCamelCase_=[2, 2, 2, 2] , lowerCamelCase_=[8, 4, 2, 1] , lowerCamelCase_=[3_2, 6_4, 1_6_0, 2_5_6] , lowerCamelCase_=[7, 3, 3, 3] , lowerCamelCase_=[4, 2, 2, 2] , lowerCamelCase_=[1, 2, 5, 8] , lowerCamelCase_=[4, 4, 4, 4] , lowerCamelCase_="gelu" , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=0.1 , lowerCamelCase_=1e-6 , lowerCamelCase_=2_5_6 , lowerCamelCase_=2_5_5 , **lowerCamelCase_ , ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , lowerCamelCase_ , )
_a : Union[str, Any] = num_channels
_a : Any = num_encoder_blocks
_a : Union[str, Any] = depths
_a : int = sr_ratios
_a : List[str] = hidden_sizes
_a : Tuple = patch_sizes
_a : Any = strides
_a : List[Any] = mlp_ratios
_a : str = num_attention_heads
_a : str = hidden_act
_a : List[Any] = hidden_dropout_prob
_a : int = attention_probs_dropout_prob
_a : Any = classifier_dropout_prob
_a : Optional[Any] = initializer_range
_a : int = drop_path_rate
_a : int = layer_norm_eps
_a : Optional[Any] = decoder_hidden_size
_a : int = kwargs.get('reshape_last_stage' , lowerCamelCase_ )
_a : str = semantic_loss_ignore_index
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Any = version.parse("""1.11""" )
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __UpperCamelCase ( self ) -> float:
return 1e-4
@property
def __UpperCamelCase ( self ) -> int:
return 1_2
| 120 | 1 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__A: List[str] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def a__ ( self : Optional[int] , _lowerCamelCase : List[Any]=0 ):
_UpperCAmelCase : List[str] = np.random.RandomState(_lowerCamelCase )
_UpperCAmelCase : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def a__ ( self : Optional[Any] ):
_UpperCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_UpperCAmelCase : Dict = self.get_dummy_inputs()
_UpperCAmelCase : List[str] = pipe(**_lowerCamelCase ).images
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_UpperCAmelCase : str = np.array([0.6_50_72, 0.5_84_92, 0.4_82_19, 0.5_55_21, 0.5_31_80, 0.5_59_39, 0.5_06_97, 0.3_98_00, 0.4_64_55] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self : List[Any] ):
_UpperCAmelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_UpperCAmelCase : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_UpperCAmelCase : List[Any] = self.get_dummy_inputs()
_UpperCAmelCase : List[Any] = pipe(**_lowerCamelCase ).images
_UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_UpperCAmelCase : int = np.array([0.6_58_63, 0.5_94_25, 0.4_93_26, 0.5_63_13, 0.5_38_75, 0.5_66_27, 0.5_10_65, 0.3_97_77, 0.4_63_30] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self : Dict ):
_UpperCAmelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_UpperCAmelCase : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_UpperCAmelCase : Optional[Any] = self.get_dummy_inputs()
_UpperCAmelCase : List[str] = pipe(**_lowerCamelCase ).images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_UpperCAmelCase : Tuple = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self : List[str] ):
_UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_UpperCAmelCase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_UpperCAmelCase : Dict = self.get_dummy_inputs()
_UpperCAmelCase : Dict = pipe(**_lowerCamelCase ).images
_UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_UpperCAmelCase : Any = np.array([0.5_37_55, 0.6_07_86, 0.4_74_02, 0.4_94_88, 0.5_18_69, 0.4_98_19, 0.4_79_85, 0.3_89_57, 0.4_42_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self : Optional[Any] ):
_UpperCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_UpperCAmelCase : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_UpperCAmelCase : Optional[Any] = self.get_dummy_inputs()
_UpperCAmelCase : str = pipe(**_lowerCamelCase ).images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_UpperCAmelCase : List[str] = np.array([0.5_38_17, 0.6_08_12, 0.4_73_84, 0.4_95_30, 0.5_18_94, 0.4_98_14, 0.4_79_84, 0.3_89_58, 0.4_42_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self : Any ):
_UpperCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_UpperCAmelCase : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_UpperCAmelCase : Optional[Any] = pipe(**_lowerCamelCase ).images
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.5_38_95, 0.6_08_08, 0.4_79_33, 0.4_96_08, 0.5_18_86, 0.4_99_50, 0.4_80_53, 0.3_89_57, 0.4_42_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self : Optional[int] ):
_UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_UpperCAmelCase : int = self.get_dummy_inputs()
_UpperCAmelCase : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
_UpperCAmelCase : int = pipe(**_lowerCamelCase )
_UpperCAmelCase : int = output.images[0, -3:, -3:, -1]
_UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs()
_UpperCAmelCase : int = 3 * [inputs.pop("prompt" )]
_UpperCAmelCase : int = pipe.tokenizer(
_lowerCamelCase , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCamelCase , return_tensors="np" , )
_UpperCAmelCase : List[str] = text_inputs["input_ids"]
_UpperCAmelCase : Dict = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_UpperCAmelCase : List[Any] = prompt_embeds
# forward
_UpperCAmelCase : int = pipe(**_lowerCamelCase )
_UpperCAmelCase : str = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def a__ ( self : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_UpperCAmelCase : List[Any] = self.get_dummy_inputs()
_UpperCAmelCase : Any = 3 * ["this is a negative prompt"]
_UpperCAmelCase : List[Any] = negative_prompt
_UpperCAmelCase : List[str] = 3 * [inputs["prompt"]]
# forward
_UpperCAmelCase : int = pipe(**_lowerCamelCase )
_UpperCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
_UpperCAmelCase : Dict = self.get_dummy_inputs()
_UpperCAmelCase : Dict = 3 * [inputs.pop("prompt" )]
_UpperCAmelCase : List[str] = []
for p in [prompt, negative_prompt]:
_UpperCAmelCase : Union[str, Any] = pipe.tokenizer(
_lowerCamelCase , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_lowerCamelCase , return_tensors="np" , )
_UpperCAmelCase : Tuple = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_UpperCAmelCase : Optional[int] = embeds
# forward
_UpperCAmelCase : Any = pipe(**_lowerCamelCase )
_UpperCAmelCase : Optional[int] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase( unittest.TestCase ):
@property
def a__ ( self : List[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self : str ):
_UpperCAmelCase : Dict = ort.SessionOptions()
_UpperCAmelCase : int = False
return options
def a__ ( self : Optional[Any] ):
# using the PNDM scheduler by default
_UpperCAmelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
_UpperCAmelCase : Optional[int] = "A painting of a squirrel eating a burger"
np.random.seed(0 )
_UpperCAmelCase : Any = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : Dict = np.array([0.04_52, 0.03_90, 0.00_87, 0.03_50, 0.06_17, 0.03_64, 0.05_44, 0.05_23, 0.07_20] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a__ ( self : Optional[int] ):
_UpperCAmelCase : int = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
_UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
_UpperCAmelCase : Dict = "open neural network exchange"
_UpperCAmelCase : Union[str, Any] = np.random.RandomState(0 )
_UpperCAmelCase : Dict = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCamelCase , output_type="np" )
_UpperCAmelCase : Any = output.images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.28_67, 0.19_74, 0.14_81, 0.72_94, 0.72_51, 0.66_67, 0.41_94, 0.56_42, 0.64_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a__ ( self : List[str] ):
_UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
_UpperCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_lowerCamelCase )
_UpperCAmelCase : int = "open neural network exchange"
_UpperCAmelCase : Tuple = np.random.RandomState(0 )
_UpperCAmelCase : Union[str, Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCamelCase , output_type="np" )
_UpperCAmelCase : List[Any] = output.images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : Tuple = np.array([0.23_06, 0.19_59, 0.15_93, 0.65_49, 0.63_94, 0.54_08, 0.50_65, 0.60_10, 0.61_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def a__ ( self : Tuple ):
_UpperCAmelCase : int = 0
def test_callback_fn(_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : np.ndarray ) -> None:
_UpperCAmelCase : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_UpperCAmelCase : Any = latents[0, -3:, -3:, -1]
_UpperCAmelCase : Tuple = np.array(
[-0.67_72, -0.38_35, -1.24_56, 0.19_05, -1.09_74, 0.69_67, -1.93_53, 0.01_78, 1.01_67] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_UpperCAmelCase : Optional[Any] = latents[0, -3:, -3:, -1]
_UpperCAmelCase : List[Any] = np.array(
[-0.33_51, 0.22_41, -0.18_37, -0.23_25, -0.65_77, 0.33_93, -0.02_41, 0.58_99, 1.38_75] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
_UpperCAmelCase : str = False
_UpperCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
_UpperCAmelCase : Any = "Andromeda galaxy in a bottle"
_UpperCAmelCase : Any = np.random.RandomState(0 )
pipe(
prompt=_lowerCamelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=_lowerCamelCase , callback=_lowerCamelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def a__ ( self : Any ):
_UpperCAmelCase : str = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert pipe.safety_checker is None
_UpperCAmelCase : List[str] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCamelCase )
_UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(_lowerCamelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCAmelCase : Any = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 717 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
set_seed(7_7_0)
__lowerCamelCase = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
__lowerCamelCase = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
__lowerCamelCase = os.path.dirname(os.path.abspath(__file__))
__lowerCamelCase = os.path.join(os.path.expanduser('~'), '.cache')
__lowerCamelCase = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : str = model_type
if use_small:
key += "_small"
return os.path.join(_SCREAMING_SNAKE_CASE , REMOTE_MODEL_PATHS[key]["file_name"] )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
hf_hub_download(repo_id=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , local_dir=_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="text" ) -> Dict:
"""simple docstring"""
if model_type == "text":
_UpperCAmelCase : int = BarkSemanticModel
_UpperCAmelCase : Dict = BarkSemanticConfig
_UpperCAmelCase : Any = BarkSemanticGenerationConfig
elif model_type == "coarse":
_UpperCAmelCase : Optional[Any] = BarkCoarseModel
_UpperCAmelCase : int = BarkCoarseConfig
_UpperCAmelCase : Any = BarkCoarseGenerationConfig
elif model_type == "fine":
_UpperCAmelCase : Dict = BarkFineModel
_UpperCAmelCase : Optional[Any] = BarkFineConfig
_UpperCAmelCase : List[str] = BarkFineGenerationConfig
else:
raise NotImplementedError()
_UpperCAmelCase : Union[str, Any] = F"""{model_type}_small""" if use_small else model_type
_UpperCAmelCase : str = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
logger.info(F"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
_UpperCAmelCase : int = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
# this is a hack
_UpperCAmelCase : Union[str, Any] = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
_UpperCAmelCase : List[Any] = model_args["vocab_size"]
_UpperCAmelCase : Union[str, Any] = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_UpperCAmelCase : str = model_args.pop("n_head" )
_UpperCAmelCase : Optional[int] = model_args.pop("n_embd" )
_UpperCAmelCase : Optional[Any] = model_args.pop("n_layer" )
_UpperCAmelCase : Tuple = ConfigClass(**checkpoint["model_args"] )
_UpperCAmelCase : List[Any] = ModelClass(config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = GenerationConfigClass()
_UpperCAmelCase : str = model_generation_config
_UpperCAmelCase : Optional[int] = checkpoint["model"]
# fixup checkpoint
_UpperCAmelCase : Union[str, Any] = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(_SCREAMING_SNAKE_CASE ):
# replace part of the key with corresponding layer name in HF implementation
_UpperCAmelCase : List[Any] = k[len(_SCREAMING_SNAKE_CASE ) :]
for old_layer_name in new_layer_name_dict:
_UpperCAmelCase : Optional[Any] = new_k.replace(_SCREAMING_SNAKE_CASE , new_layer_name_dict[old_layer_name] )
_UpperCAmelCase : str = state_dict.pop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
_UpperCAmelCase : List[Any] = {k for k in extra_keys if not k.endswith(".attn.bias" )}
_UpperCAmelCase : str = set(model.state_dict().keys() ) - set(state_dict.keys() )
_UpperCAmelCase : Optional[int] = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F"""extra keys found: {extra_keys}""" )
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F"""missing keys: {missing_keys}""" )
model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = checkpoint["best_val_loss"].item()
logger.info(F"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(_SCREAMING_SNAKE_CASE , 3 )} loss""" )
model.eval()
model.to(_SCREAMING_SNAKE_CASE )
del checkpoint, state_dict
return model
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="text" ) -> Tuple:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_UpperCAmelCase : Optional[Any] = "cpu" # do conversion on cpu
_UpperCAmelCase : List[str] = _get_ckpt_path(_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = _load_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model_type=_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
# load bark initial model
_UpperCAmelCase : Union[str, Any] = _bark_load_model(_SCREAMING_SNAKE_CASE , "cpu" , model_type=_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
if model_type == "text":
_UpperCAmelCase : Tuple = bark_model["model"]
if model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
_UpperCAmelCase : Dict = 5
_UpperCAmelCase : List[str] = 1_0
if model_type in ["text", "coarse"]:
_UpperCAmelCase : str = torch.randint(2_5_6 , (batch_size, sequence_length) , dtype=torch.int )
_UpperCAmelCase : Dict = bark_model(_SCREAMING_SNAKE_CASE )[0]
_UpperCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
# take last logits
_UpperCAmelCase : Tuple = output_new_model_total.logits[:, [-1], :]
else:
_UpperCAmelCase : List[Any] = 3
_UpperCAmelCase : Any = 8
_UpperCAmelCase : List[str] = torch.randint(2_5_6 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_UpperCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = bark_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("initial and new outputs are not equal" )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = BarkSemanticConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , "config.json" ) )
_UpperCAmelCase : Tuple = BarkCoarseConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , "config.json" ) )
_UpperCAmelCase : str = BarkFineConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , "config.json" ) )
_UpperCAmelCase : Dict = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
_UpperCAmelCase : List[Any] = BarkSemanticModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : int = BarkCoarseModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : int = BarkFineModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = EncodecModel.from_pretrained("facebook/encodec_24khz" )
_UpperCAmelCase : Dict = BarkConfig.from_sub_model_configs(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_UpperCAmelCase : Any = BarkModel(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : int = semantic
_UpperCAmelCase : Tuple = coarseAcoustic
_UpperCAmelCase : str = fineAcoustic
_UpperCAmelCase : str = codec
_UpperCAmelCase : Optional[Any] = bark_generation_config
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
bark.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id=_SCREAMING_SNAKE_CASE , push_to_hub=_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
__lowerCamelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 328 | 0 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowercase : Dict =nn.ModuleList(UpperCAmelCase )
def A__ ( self : Optional[int] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Union[torch.Tensor, float, int] , UpperCAmelCase : torch.Tensor , UpperCAmelCase : List[torch.tensor] , UpperCAmelCase : List[float] , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[torch.Tensor] = None , UpperCAmelCase : Optional[Dict[str, Any]] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(UpperCAmelCase , UpperCAmelCase , self.nets ) ):
lowercase , lowercase : List[Any] =controlnet(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , )
# merge samples
if i == 0:
lowercase , lowercase : Union[str, Any] =down_samples, mid_sample
else:
lowercase : Union[str, Any] =[
samples_prev + samples_curr
for samples_prev, samples_curr in zip(UpperCAmelCase , UpperCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A__ ( self : Dict , UpperCAmelCase : Union[str, os.PathLike] , UpperCAmelCase : bool = True , UpperCAmelCase : Callable = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[str] = None , ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[int] =0
lowercase : Dict =save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
UpperCAmelCase , is_main_process=UpperCAmelCase , save_function=UpperCAmelCase , safe_serialization=UpperCAmelCase , variant=UpperCAmelCase , )
idx += 1
lowercase : str =model_path_to_save + f'_{idx}'
@classmethod
def A__ ( cls : str , UpperCAmelCase : Optional[Union[str, os.PathLike]] , **UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
lowercase : Optional[int] =0
lowercase : str =[]
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowercase : List[str] =pretrained_model_path
while os.path.isdir(UpperCAmelCase ):
lowercase : Optional[Any] =ControlNetModel.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
controlnets.append(UpperCAmelCase )
idx += 1
lowercase : Dict =pretrained_model_path + f'_{idx}'
logger.info(f'{len(UpperCAmelCase )} controlnets loaded from {pretrained_model_path}.' )
if len(UpperCAmelCase ) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(UpperCAmelCase )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(UpperCAmelCase )
| 94 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : Any = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __A( a ):
snake_case_ = '''levit'''
def __init__( self , _snake_case=224 , _snake_case=3 , _snake_case=3 , _snake_case=2 , _snake_case=1 , _snake_case=16 , _snake_case=[128, 256, 384] , _snake_case=[4, 8, 12] , _snake_case=[4, 4, 4] , _snake_case=[16, 16, 16] , _snake_case=0 , _snake_case=[2, 2, 2] , _snake_case=[2, 2, 2] , _snake_case=0.02 , **_snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__(**_snake_case )
__a = image_size
__a = num_channels
__a = kernel_size
__a = stride
__a = padding
__a = hidden_sizes
__a = num_attention_heads
__a = depths
__a = key_dim
__a = drop_path_rate
__a = patch_size
__a = attention_ratio
__a = mlp_ratio
__a = initializer_range
__a = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __A( a ):
snake_case_ = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> float:
'''simple docstring'''
return 1E-4 | 219 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase ( __UpperCAmelCase ):
def __init__( self : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any]=13 , __snake_case : str=7 , __snake_case : List[str]=True , __snake_case : Any=True , __snake_case : str=True , __snake_case : Optional[Any]=True , __snake_case : str=99 , __snake_case : Optional[Any]=32 , __snake_case : Any=5 , __snake_case : Any=4 , __snake_case : int=37 , __snake_case : Dict="gelu" , __snake_case : Optional[int]=0.1 , __snake_case : List[str]=0.1 , __snake_case : Any=5_12 , __snake_case : Union[str, Any]=16 , __snake_case : Tuple=2 , __snake_case : Dict=0.02 , __snake_case : List[Any]=False , __snake_case : Optional[int]=True , __snake_case : Dict="None" , __snake_case : Optional[int]=3 , __snake_case : List[str]=4 , __snake_case : Any=None , ):
'''simple docstring'''
_snake_case: List[str] = parent
_snake_case: int = batch_size
_snake_case: Optional[Any] = seq_length
_snake_case: str = is_training
_snake_case: Any = use_input_mask
_snake_case: Any = use_token_type_ids
_snake_case: str = use_labels
_snake_case: List[str] = vocab_size
_snake_case: int = hidden_size
_snake_case: List[Any] = num_hidden_layers
_snake_case: Any = num_attention_heads
_snake_case: int = intermediate_size
_snake_case: int = hidden_act
_snake_case: int = hidden_dropout_prob
_snake_case: Dict = attention_probs_dropout_prob
_snake_case: Optional[Any] = max_position_embeddings
_snake_case: str = type_vocab_size
_snake_case: Dict = type_sequence_label_size
_snake_case: List[Any] = initializer_range
_snake_case: Dict = num_labels
_snake_case: int = num_choices
_snake_case: Optional[Any] = relative_attention
_snake_case: str = position_biased_input
_snake_case: List[str] = pos_att_type
_snake_case: Optional[int] = scope
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case: Dict = None
if self.use_input_mask:
_snake_case: int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_snake_case: int = None
if self.use_token_type_ids:
_snake_case: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case: Optional[int] = None
_snake_case: List[str] = None
_snake_case: int = None
if self.use_labels:
_snake_case: int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case: int = ids_tensor([self.batch_size] , self.num_choices )
_snake_case: Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , __snake_case : Any ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def SCREAMING_SNAKE_CASE_ ( self : str , __snake_case : Tuple , __snake_case : Tuple , __snake_case : List[str] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
_snake_case: Any = DebertaVaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: int = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )[0]
_snake_case: List[Any] = model(__snake_case , token_type_ids=__snake_case )[0]
_snake_case: Dict = model(__snake_case )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Tuple , __snake_case : str , __snake_case : Optional[Any] , __snake_case : int ):
'''simple docstring'''
_snake_case: int = DebertaVaForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Any = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , __snake_case : List[Any] , __snake_case : int , __snake_case : str , __snake_case : int , __snake_case : Tuple , __snake_case : List[str] , __snake_case : str ):
'''simple docstring'''
_snake_case: Union[str, Any] = self.num_labels
_snake_case: str = DebertaVaForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : int , __snake_case : Tuple , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] ):
'''simple docstring'''
_snake_case: Optional[int] = self.num_labels
_snake_case: Optional[Any] = DebertaVaForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Union[str, Any] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Any , __snake_case : List[Any] , __snake_case : Any , __snake_case : Any , __snake_case : Any , __snake_case : List[str] , __snake_case : Dict , __snake_case : List[str] ):
'''simple docstring'''
_snake_case: Optional[int] = DebertaVaForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Optional[int] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : int , __snake_case : Tuple ):
'''simple docstring'''
_snake_case: Any = DebertaVaForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case: List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case: Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case: Tuple = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: Optional[Any] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
): int = config_and_inputs
_snake_case: Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: List[str] = DebertaVaModelTester(self )
_snake_case: Union[str, Any] = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
_snake_case: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case: int = DebertaVaModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: List[Any] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
_snake_case: Dict = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_snake_case: str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case: Any = model(__snake_case , attention_mask=__snake_case )[0]
# compare the actual values for a slice.
_snake_case: List[Any] = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __snake_case , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 273 |
'''simple docstring'''
def lowercase_ ( lowercase__ = 50 ) ->int:
_snake_case: Union[str, Any] = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'{solution() = }')
| 273 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Any = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """trajectory_transformer"""
_snake_case = ["""past_key_values"""]
_snake_case = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , A=1_0_0 , A=5 , A=1 , A=1 , A=2_4_9 , A=6 , A=1_7 , A=2_5 , A=4 , A=4 , A=1_2_8 , A=0.1 , A=0.1 , A=0.1 , A=0.00_06 , A=5_1_2 , A=0.02 , A=1e-1_2 , A=1 , A=True , A=1 , A=5_0_2_5_6 , A=5_0_2_5_6 , **A , ) -> Tuple:
snake_case : List[Any] = vocab_size
snake_case : Union[str, Any] = action_weight
snake_case : List[Any] = reward_weight
snake_case : Optional[Any] = value_weight
snake_case : List[str] = max_position_embeddings
snake_case : Union[str, Any] = block_size
snake_case : str = action_dim
snake_case : int = observation_dim
snake_case : Union[str, Any] = transition_dim
snake_case : List[Any] = learning_rate
snake_case : int = n_layer
snake_case : List[Any] = n_head
snake_case : Any = n_embd
snake_case : List[str] = embd_pdrop
snake_case : Dict = attn_pdrop
snake_case : Tuple = resid_pdrop
snake_case : int = initializer_range
snake_case : Dict = layer_norm_eps
snake_case : Optional[Any] = kaiming_initializer_range
snake_case : List[str] = use_cache
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
| 587 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = RoFormerTokenizer
_snake_case = RoFormerTokenizerFast
_snake_case = True
_snake_case = True
def UpperCAmelCase ( self ) -> int:
super().setUp()
def UpperCAmelCase ( self , **A ) -> Any:
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **A )
def UpperCAmelCase ( self , **A ) -> Union[str, Any]:
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **A )
def UpperCAmelCase ( self ) -> Any:
snake_case : int = """永和服装饰品有限公司,今天天气非常好"""
snake_case : Optional[int] = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def UpperCAmelCase ( self ) -> int:
snake_case : Optional[Any] = self.get_tokenizer()
snake_case , snake_case : Union[str, Any] = self.get_chinese_input_output_texts()
snake_case : Any = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
snake_case : List[Any] = tokens + [tokenizer.unk_token]
snake_case : Optional[int] = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Union[str, Any] = self.get_rust_tokenizer()
snake_case , snake_case : int = self.get_chinese_input_output_texts()
snake_case : Dict = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
snake_case : List[Any] = tokens + [tokenizer.unk_token]
snake_case : Any = [2_2_9_4_3, 2_1_3_3_2, 3_4_4_3_1, 4_5_9_0_4, 1_1_7, 3_0_6, 1_2_3_1, 1_2_3_1, 2_6_5_3, 3_3_9_9_4, 1_2_6_6, 1_0_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def UpperCAmelCase ( self ) -> Dict:
pass
def UpperCAmelCase ( self ) -> Union[str, Any]:
pass
def UpperCAmelCase ( self ) -> Dict:
pass
| 587 | 1 |
"""simple docstring"""
def lowercase_ ( _snake_case ,_snake_case = False ):
if not isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Dict = f'''Expected string as input, found {type(_snake_case )}'''
raise ValueError(_snake_case )
if not isinstance(_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : Tuple = f'''Expected boolean as use_pascal parameter, found {type(_snake_case )}'''
raise ValueError(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = input_str.split("""_""" )
SCREAMING_SNAKE_CASE__ : str = 0 if use_pascal else 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = words[start_index:]
SCREAMING_SNAKE_CASE__ : Tuple = [word[0].upper() + word[1:] for word in words_to_capitalize]
SCREAMING_SNAKE_CASE__ : str = """""" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 545 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase__ : Tuple = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : str = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 545 | 1 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowercase : Tuple = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowercase : List[str] = """cuda""" if torch.cuda.is_available() else """cpu"""
def UpperCAmelCase_ (_lowerCAmelCase : Any , _lowerCAmelCase : Any=1_00 , _lowerCAmelCase : str=" " ):
__UpperCamelCase : List[str] = text.split(SCREAMING_SNAKE_CASE__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )]
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] ):
__UpperCamelCase , __UpperCamelCase : List[Any] = [], []
for title, text in zip(documents["title"] , documents["text"] ):
if text is not None:
for passage in split_text(SCREAMING_SNAKE_CASE__ ):
titles.append(title if title is not None else "" )
texts.append(SCREAMING_SNAKE_CASE__ )
return {"title": titles, "text": texts}
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str ):
__UpperCamelCase : Dict = ctx_tokenizer(
documents["title"] , documents["text"] , truncation=SCREAMING_SNAKE_CASE__ , padding="longest" , return_tensors="pt" )["input_ids"]
__UpperCamelCase : Optional[Any] = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE__ ) , return_dict=SCREAMING_SNAKE_CASE__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , ):
logger.info("Step 1 - Create the dataset" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__UpperCamelCase : List[Any] = load_dataset(
"csv" , data_files=[rag_example_args.csv_path] , split="train" , delimiter="\t" , column_names=["title", "text"] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__UpperCamelCase : Dict = dataset.map(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , num_proc=processing_args.num_proc )
# And compute the embeddings
__UpperCamelCase : Dict = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase : Tuple = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__UpperCamelCase : int = Features(
{"text": Value("string" ), "title": Value("string" ), "embeddings": Sequence(Value("float32" ) )} ) # optional, save as float32 instead of float64 to save space
__UpperCamelCase : Tuple = dataset.map(
partial(SCREAMING_SNAKE_CASE__ , ctx_encoder=SCREAMING_SNAKE_CASE__ , ctx_tokenizer=SCREAMING_SNAKE_CASE__ ) , batched=SCREAMING_SNAKE_CASE__ , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE__ , )
# And finally save your dataset
__UpperCamelCase : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset" )
dataset.save_to_disk(SCREAMING_SNAKE_CASE__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("Step 2 - Index the dataset" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__UpperCamelCase : int = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("embeddings" , custom_index=SCREAMING_SNAKE_CASE__ )
# And save the index
__UpperCamelCase : Union[str, Any] = os.path.join(rag_example_args.output_dir , "my_knowledge_dataset_hnsw_index.faiss" )
dataset.get_index("embeddings" ).save(SCREAMING_SNAKE_CASE__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
lowercase : str = field(
default=str(Path(__UpperCamelCase ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowercase : Optional[str] = field(
default=__UpperCamelCase , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowercase : str = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowercase : str = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowercase : Optional[str] = field(
default=str(Path(__UpperCamelCase ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
lowercase : Optional[int] = field(
default=__UpperCamelCase , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowercase : int = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
lowercase : int = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowercase : int = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowercase : str = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowercase : Union[str, Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowercase : Union[str, Any] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args) | 327 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
__magic_name__ : Optional[int] = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''git_vision_model'''
def __init__( self , lowerCamelCase=768 , lowerCamelCase=3_072 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3 , lowerCamelCase=224 , lowerCamelCase=16 , lowerCamelCase="quick_gelu" , lowerCamelCase=1e-5 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = hidden_size
_snake_case = intermediate_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = num_channels
_snake_case = patch_size
_snake_case = image_size
_snake_case = initializer_range
_snake_case = attention_dropout
_snake_case = layer_norm_eps
_snake_case = hidden_act
@classmethod
def UpperCamelCase( cls , lowerCamelCase , **lowerCamelCase ):
cls._set_token_in_kwargs(lowerCamelCase )
_snake_case , _snake_case = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
_snake_case = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = '''git'''
def __init__( self , lowerCamelCase=None , lowerCamelCase=30_522 , lowerCamelCase=768 , lowerCamelCase=6 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=1_024 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=0 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=101 , lowerCamelCase=102 , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , pad_token_id=lowerCamelCase , **lowerCamelCase )
if vision_config is None:
_snake_case = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
_snake_case = GitVisionConfig(**lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
_snake_case = tie_word_embeddings
_snake_case = num_image_with_embedding
_snake_case = bos_token_id
_snake_case = eos_token_id
def UpperCamelCase( self ):
_snake_case = copy.deepcopy(self.__dict__ )
_snake_case = self.vision_config.to_dict()
_snake_case = self.__class__.model_type
return output
| 672 | 0 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Optional[Any] =["input_features", "attention_mask"]
def __init__( self : Any , lowerCAmelCase : Tuple=80 , lowerCAmelCase : Any=1_60_00 , lowerCAmelCase : Union[str, Any]=80 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Any=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Tuple=True , **lowerCAmelCase : Any , ) -> Optional[int]:
"""simple docstring"""
super().__init__(feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase )
__lowerCAmelCase : Any = num_mel_bins
__lowerCAmelCase : Dict = do_ceptral_normalize
__lowerCAmelCase : str = normalize_means
__lowerCAmelCase : str = normalize_vars
__lowerCAmelCase : Optional[Any] = True
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : np.ndarray , ) -> np.ndarray:
"""simple docstring"""
__lowerCAmelCase : Dict = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__lowerCAmelCase : Optional[int] = torch.from_numpy(lowerCAmelCase ).unsqueeze(0 )
__lowerCAmelCase : int = ta_kaldi.fbank(lowerCAmelCase , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def SCREAMING_SNAKE_CASE ( lowerCAmelCase : np.ndarray , lowerCAmelCase : int , lowerCAmelCase : Optional[bool] = True , lowerCAmelCase : Optional[bool] = True , lowerCAmelCase : float = 0.0 , ) -> np.ndarray:
"""simple docstring"""
if normalize_means:
__lowerCAmelCase : Optional[Any] = x[:input_length].mean(axis=0 )
__lowerCAmelCase : List[str] = np.subtract(lowerCAmelCase , lowerCAmelCase )
if normalize_vars:
__lowerCAmelCase : List[str] = x[:input_length].std(axis=0 )
__lowerCAmelCase : Optional[int] = np.divide(lowerCAmelCase , lowerCAmelCase )
if input_length < x.shape[0]:
__lowerCAmelCase : int = padding_value
# make sure array is in float32
__lowerCAmelCase : int = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : List[np.ndarray] , lowerCAmelCase : Optional[np.ndarray] = None ) -> List[np.ndarray]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(lowerCAmelCase , lowerCAmelCase , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(lowerCAmelCase , lowerCAmelCase )
]
def __call__( self : str , lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[bool] = None , **lowerCAmelCase : Any , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__lowerCAmelCase : int = isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__lowerCAmelCase : Union[str, Any] = is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCAmelCase : List[Any] = [np.asarray(lowerCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowerCAmelCase : List[str] = np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCAmelCase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCAmelCase : Any = [raw_speech]
# extract fbank features
__lowerCAmelCase : Union[str, Any] = [self._extract_fbank_features(lowerCAmelCase ) for waveform in raw_speech]
# convert into correct format for padding
__lowerCAmelCase : List[str] = BatchFeature({"""input_features""": features} )
__lowerCAmelCase : Union[str, Any] = self.pad(
lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , truncation=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , )
# make sure list is in array format
__lowerCAmelCase : Any = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , lowerCAmelCase ):
__lowerCAmelCase : Optional[int] = [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in input_features]
__lowerCAmelCase : Tuple = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__lowerCAmelCase : Union[str, Any] = [np.asarray(lowerCAmelCase , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__lowerCAmelCase : Any = (
np.array(lowerCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase , max_length=lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__lowerCAmelCase : Dict = self.normalize(
padded_inputs["""input_features"""] , attention_mask=lowerCAmelCase )
if return_tensors is not None:
__lowerCAmelCase : str = padded_inputs.convert_to_tensors(lowerCAmelCase )
return padded_inputs
| 218 |
def snake_case_ (__A : int = 1_0**9 ) -> int:
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : Dict = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__lowerCAmelCase : int = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F'{solution() = }')
| 218 | 1 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowercase__ , lowercase__ , lowercase__ : Optional[int] = False, False, False
@dataclass
class SCREAMING_SNAKE_CASE :
lowerCAmelCase = None
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = None
# Automatically constructed
lowerCAmelCase = "dict"
lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase = field(default='''Audio''' , init=a__ , repr=a__ )
def __call__( self):
'''simple docstring'''
return self.pa_type
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.') from err
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
return {"bytes": None, "path": value}
elif isinstance(_UpperCAmelCase , _UpperCAmelCase):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__A : Optional[int] = BytesIO()
sf.write(_UpperCAmelCase , value['array'] , value['sampling_rate'] , format='wav')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path') is not None and os.path.isfile(value['path']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm'):
# "PCM" only has raw audio bytes
if value.get('sampling_rate') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object')
if value.get('bytes'):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__A : Tuple = np.frombuffer(value['bytes'] , dtype=np.intaa).astype(np.floataa) / 3_2767
else:
__A : List[Any] = np.memmap(value['path'] , dtype='h' , mode='r').astype(np.floataa) / 3_2767
__A : Optional[Any] = BytesIO(bytes())
sf.write(_UpperCAmelCase , _UpperCAmelCase , value['sampling_rate'] , format='wav')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path')}
elif value.get('bytes') is not None or value.get('path') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes'), "path": value.get('path')}
else:
raise ValueError(
F'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.')
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.')
__A ,__A : Any = (value['path'], BytesIO(value['bytes'])) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(F'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.')
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.') from err
__A : Tuple = xsplitext(_UpperCAmelCase)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ')
if file is None:
__A : List[str] = token_per_repo_id or {}
__A : Optional[int] = path.split('::')[-1]
try:
__A : str = string_to_dict(_UpperCAmelCase , config.HUB_DATASETS_URL)['repo_id']
__A : Union[str, Any] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__A : List[Any] = None
with xopen(_UpperCAmelCase , 'rb' , use_auth_token=_UpperCAmelCase) as f:
__A ,__A : Optional[Any] = sf.read(_UpperCAmelCase)
else:
__A ,__A : Tuple = sf.read(_UpperCAmelCase)
__A : Union[str, Any] = array.T
if self.mono:
__A : Optional[Any] = librosa.to_mono(_UpperCAmelCase)
if self.sampling_rate and self.sampling_rate != sampling_rate:
__A : List[Any] = librosa.resample(_UpperCAmelCase , orig_sr=_UpperCAmelCase , target_sr=self.sampling_rate)
__A : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.')
return {
"bytes": Value('binary'),
"path": Value('string'),
}
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if pa.types.is_string(storage.type):
__A : str = pa.array([None] * len(_UpperCAmelCase) , type=pa.binary())
__A : Optional[int] = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
__A : Optional[Any] = pa.array([None] * len(_UpperCAmelCase) , type=pa.string())
__A : str = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('array'):
__A : Dict = pa.array([Audio().encode_example(_UpperCAmelCase) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('bytes') >= 0:
__A : Tuple = storage.field('bytes')
else:
__A : Any = pa.array([None] * len(_UpperCAmelCase) , type=pa.binary())
if storage.type.get_field_index('path') >= 0:
__A : List[Any] = storage.field('path')
else:
__A : Dict = pa.array([None] * len(_UpperCAmelCase) , type=pa.string())
__A : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null())
return array_cast(_UpperCAmelCase , self.pa_type)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(_UpperCAmelCase):
with xopen(_UpperCAmelCase , 'rb') as f:
__A : Union[str, Any] = f.read()
return bytes_
__A : int = pa.array(
[
(path_to_bytes(x['path']) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__A : Optional[int] = pa.array(
[os.path.basename(_UpperCAmelCase) if path is not None else None for path in storage.field('path').to_pylist()] , type=pa.string() , )
__A : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null())
return array_cast(_UpperCAmelCase , self.pa_type) | 8 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _snake_case( unittest.TestCase ):
def _UpperCamelCase (self : List[str] ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCamelCase (self : int ) -> Any:
"""simple docstring"""
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(a )
return image
@property
def _UpperCamelCase (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=a , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def _UpperCamelCase (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _UpperCamelCase (self : str ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
return CLIPTextModel(a )
def _UpperCamelCase (self : str ) -> List[str]:
"""simple docstring"""
A__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet_upscale
A__ = DDPMScheduler()
A__ = DDIMScheduler(prediction_type='v_prediction' )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(a ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A__ = StableDiffusionUpscalePipeline(
unet=a , low_res_scheduler=a , scheduler=a , vae=a , text_encoder=a , tokenizer=a , max_noise_level=3_50 , )
A__ = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
A__ = 'A painting of a squirrel eating a burger'
A__ = torch.Generator(device=a ).manual_seed(0 )
A__ = sd_pipe(
[prompt] , image=a , generator=a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
A__ = output.images
A__ = torch.Generator(device=a ).manual_seed(0 )
A__ = sd_pipe(
[prompt] , image=a , generator=a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=a , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
A__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
A__ = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet_upscale
A__ = DDPMScheduler()
A__ = DDIMScheduler(prediction_type='v_prediction' )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(a ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A__ = StableDiffusionUpscalePipeline(
unet=a , low_res_scheduler=a , scheduler=a , vae=a , text_encoder=a , tokenizer=a , max_noise_level=3_50 , )
A__ = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
A__ = 'A painting of a squirrel eating a burger'
A__ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
A__ = output.images
assert image.shape[0] == 2
A__ = torch.Generator(device=a ).manual_seed(0 )
A__ = sd_pipe(
[prompt] , image=a , generator=a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
A__ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _UpperCamelCase (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.dummy_cond_unet_upscale
A__ = DDPMScheduler()
A__ = DDIMScheduler(prediction_type='v_prediction' )
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(a ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
A__ = unet.half()
A__ = text_encoder.half()
# make sure here that pndm scheduler skips prk
A__ = StableDiffusionUpscalePipeline(
unet=a , low_res_scheduler=a , scheduler=a , vae=a , text_encoder=a , tokenizer=a , max_noise_level=3_50 , )
A__ = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
A__ = 'A painting of a squirrel eating a burger'
A__ = torch.manual_seed(0 )
A__ = sd_pipe(
[prompt] , image=a , generator=a , num_inference_steps=2 , output_type='np' , ).images
A__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _snake_case( unittest.TestCase ):
def _UpperCamelCase (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
A__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
A__ = 'stabilityai/stable-diffusion-x4-upscaler'
A__ = StableDiffusionUpscalePipeline.from_pretrained(a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
A__ = 'a cat sitting on a park bench'
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=a , image=a , generator=a , output_type='np' , )
A__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _UpperCamelCase (self : Any ) -> str:
"""simple docstring"""
A__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
A__ = 'stabilityai/stable-diffusion-x4-upscaler'
A__ = StableDiffusionUpscalePipeline.from_pretrained(
a , torch_dtype=torch.floataa , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
A__ = 'a cat sitting on a park bench'
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=a , image=a , generator=a , output_type='np' , )
A__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _UpperCamelCase (self : Dict ) -> List[str]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
A__ = 'stabilityai/stable-diffusion-x4-upscaler'
A__ = StableDiffusionUpscalePipeline.from_pretrained(
a , torch_dtype=torch.floataa , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A__ = 'a cat sitting on a park bench'
A__ = torch.manual_seed(0 )
A__ = pipe(
prompt=a , image=a , generator=a , num_inference_steps=5 , output_type='np' , )
A__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 531 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.getLogger(__name__)
def _snake_case ( ):
A__ = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=UpperCAmelCase_ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=UpperCAmelCase_ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=UpperCAmelCase_ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=UpperCAmelCase_ , default=1000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=UpperCAmelCase_ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=UpperCAmelCase_ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=UpperCAmelCase_ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
A__ = parser.parse_args()
return args
def _snake_case ( UpperCAmelCase_ : Any ):
def fn(UpperCAmelCase_ : Union[str, Any] ):
return tokenizer(examples["""text"""] )
return fn
def _snake_case ( UpperCAmelCase_ : Optional[Any] ):
A__ = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
A__ = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
A__ = tf.train.Features(feature=UpperCAmelCase_ )
A__ = tf.train.Example(features=UpperCAmelCase_ )
A__ = example.SerializeToString()
records.append(UpperCAmelCase_ )
return records
def _snake_case ( UpperCAmelCase_ : Optional[int] ):
A__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
A__ = min(len(UpperCAmelCase_ ) , args.limit )
A__ = dataset.select(range(UpperCAmelCase_ ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
A__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
A__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(UpperCAmelCase_ ):
os.makedirs(UpperCAmelCase_ )
else:
A__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
A__ = tokenize_function(UpperCAmelCase_ )
A__ = dataset.map(UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(UpperCAmelCase_ : Tuple ):
# Concatenate all texts.
A__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
A__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
A__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
A__ = {
k: [t[i : i + args.max_length] for i in range(0 , UpperCAmelCase_ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
A__ = dataset_tokenized.map(UpperCAmelCase_ , batched=UpperCAmelCase_ , batch_size=1000 , num_proc=4 )
A__ = 0
A__ = 0
for shard in range(0 , len(UpperCAmelCase_ ) , args.shard_size ):
A__ = grouped_dataset[shard : shard + args.shard_size]
A__ = len(dataset_snapshot["""input_ids"""] )
A__ = os.path.join(UpperCAmelCase_ , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
A__ = get_serialized_examples(UpperCAmelCase_ )
with tf.io.TFRecordWriter(UpperCAmelCase_ ) as out_file:
for i in range(len(UpperCAmelCase_ ) ):
A__ = serialized_examples[i]
out_file.write(UpperCAmelCase_ )
print("""Wrote file {} containing {} records""".format(UpperCAmelCase_ , UpperCAmelCase_ ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , """w""" ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parse_args()
main(args)
| 500 |
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : int ):
A__ = generate_pascal_triangle(UpperCAmelCase_ )
for row_idx in range(UpperCAmelCase_ ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=""" """ )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """ )
else:
print(triangle[row_idx][col_idx] , end="""""" )
print()
def _snake_case ( UpperCAmelCase_ : int ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
A__ = []
for current_row_idx in range(UpperCAmelCase_ ):
A__ = populate_current_row(UpperCAmelCase_ , UpperCAmelCase_ )
triangle.append(UpperCAmelCase_ )
return triangle
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : int ):
A__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A__ , A__ = 1, 1
for current_col_idx in range(1 , UpperCAmelCase_ ):
calculate_current_element(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return current_row
def _snake_case ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , ):
A__ = triangle[current_row_idx - 1][current_col_idx - 1]
A__ = triangle[current_row_idx - 1][current_col_idx]
A__ = above_to_left_elt + above_to_right_elt
def _snake_case ( UpperCAmelCase_ : int ):
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError("""The input value of 'num_rows' should be 'int'""" )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""" )
A__ = [[1]]
for row_index in range(1 , UpperCAmelCase_ ):
A__ = [0] + result[-1] + [0]
A__ = row_index + 1
# Calculate the number of distinct elements in a row
A__ = sum(divmod(UpperCAmelCase_ , 2 ) )
A__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
A__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A__ = row_first_half + row_second_half
result.append(UpperCAmelCase_ )
return result
def _snake_case ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCAmelCase_ : Callable , UpperCAmelCase_ : int ) -> None:
A__ = F"""{func.__name__}({value})"""
A__ = timeit(F"""__main__.{call}""" , setup="""import __main__""" )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 500 | 1 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _snake_case ( snake_case_ ):
'''simple docstring'''
__snake_case = ""
__snake_case = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self: Any , __UpperCamelCase: Optional[DatasetInfo] = None , __UpperCamelCase: Optional[str] = None , **__UpperCamelCase: str , ) -> List[str]:
super().__init__(self , **__UpperCamelCase )
__magic_name__ : Dict = repo_info
__magic_name__ : List[Any] = token
__magic_name__ : Union[str, Any] = None
def lowerCAmelCase__ ( self: List[str] ) -> Optional[int]:
if self.dir_cache is None:
__magic_name__ : Dict = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__magic_name__ : int = {
"name": hf_file.rfilename,
"size": None,
"type": "file",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"name": str(__UpperCamelCase ), "size": None, "type": "directory"}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ ( self: Tuple , __UpperCamelCase: str , __UpperCamelCase: str = "rb" , **__UpperCamelCase: Dict , ) -> Tuple:
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__magic_name__ : List[Any] = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open()
def lowerCAmelCase__ ( self: int , __UpperCamelCase: Optional[Any] , **__UpperCamelCase: Optional[Any] ) -> Dict:
self._get_dirs()
__magic_name__ : Optional[int] = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def lowerCAmelCase__ ( self: Tuple , __UpperCamelCase: Any , __UpperCamelCase: Tuple=False , **__UpperCamelCase: Optional[Any] ) -> Any:
self._get_dirs()
__magic_name__ : Union[str, Any] = PurePosixPath(path.strip("/" ) )
__magic_name__ : Optional[int] = {}
for p, f in self.dir_cache.items():
__magic_name__ : Any = PurePosixPath(p.strip("/" ) )
__magic_name__ : Dict = p.parent
if root == path:
__magic_name__ : List[str] = f
__magic_name__ : Dict = list(paths.values() )
if detail:
return out
else:
return sorted(f["name"] for f in out ) | 436 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = cva.getAffineTransform(UpperCamelCase__ , UpperCamelCase__ )
return cva.warpAffine(UpperCamelCase__ , UpperCamelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
_SCREAMING_SNAKE_CASE : int = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
_SCREAMING_SNAKE_CASE : Tuple = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = gray_img.shape
# set different points to rotate image
_SCREAMING_SNAKE_CASE : Dict = np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
_SCREAMING_SNAKE_CASE : str = np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
_SCREAMING_SNAKE_CASE : Any = np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
_SCREAMING_SNAKE_CASE : Dict = np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
_SCREAMING_SNAKE_CASE : List[str] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
_SCREAMING_SNAKE_CASE : Optional[int] = plt.figure(1)
_SCREAMING_SNAKE_CASE : Any = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show() | 436 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ) -> Any:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = relative_attention
_UpperCamelCase = position_biased_input
_UpperCamelCase = pos_att_type
_UpperCamelCase = scope
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDebertaVaModel(config=_UpperCamelCase)
_UpperCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_UpperCamelCase = [input_ids, input_mask]
_UpperCamelCase = model(_UpperCamelCase)
_UpperCamelCase = model(_UpperCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = TFDebertaVaForMaskedLM(config=_UpperCamelCase)
_UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_UpperCamelCase = model(_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDebertaVaForSequenceClassification(config=_UpperCamelCase)
_UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_UpperCamelCase = model(_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = TFDebertaVaForTokenClassification(config=_UpperCamelCase)
_UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_UpperCamelCase = model(_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> int:
'''simple docstring'''
_UpperCamelCase = TFDebertaVaForQuestionAnswering(config=_UpperCamelCase)
_UpperCamelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_UpperCamelCase = model(_UpperCamelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
_UpperCamelCase
) = config_and_inputs
_UpperCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
lowercase__ = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase__ = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = TFDebertaVaModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase)
@slow
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''')
self.assertIsNotNone(_UpperCamelCase)
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''')
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
pass
@slow
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''')
_UpperCamelCase = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]])
_UpperCamelCase = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
_UpperCamelCase = model(_UpperCamelCase , attention_mask=_UpperCamelCase)[0]
_UpperCamelCase = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]])
tf.debugging.assert_near(output[:, 1:4, 1:4] , _UpperCamelCase , atol=1e-4)
| 715 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCAmelCase( unittest.TestCase ):
def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , __a=None , __a=True , ) -> int:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 20}
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_flip_channel_order
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ):
lowercase__ = MobileViTImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = MobileViTImageProcessingTester(self)
@property
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__a , '''do_resize'''))
self.assertTrue(hasattr(__a , '''size'''))
self.assertTrue(hasattr(__a , '''do_center_crop'''))
self.assertTrue(hasattr(__a , '''center_crop'''))
self.assertTrue(hasattr(__a , '''do_flip_channel_order'''))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 20})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
_UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a)
for image in image_inputs:
self.assertIsInstance(__a , Image.Image)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a)
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
# Initialize image_processing
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a)
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor)
# Test not batched input
_UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 78 | 0 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
snake_case = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
snake_case = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
snake_case = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION,_KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ):
"""simple docstring"""
_snake_case = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
_snake_case = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
_snake_case = evaluate(dataset=__lowerCamelCase , predictions=__lowerCamelCase )
return score
| 103 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A__: Union[str, Any] = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def lowerCAmelCase_ ( A_ ,A_=None):
require_version(deps[pkg] ,A_)
| 380 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = []
for part_id in partition_order:
__UpperCamelCase :Optional[int] = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(SCREAMING_SNAKE_CASE ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__UpperCamelCase :List[Any] = spark.range(100 ).repartition(1 )
__UpperCamelCase :Any = Spark(SCREAMING_SNAKE_CASE )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__UpperCamelCase :Union[str, Any] = spark.range(10 ).repartition(2 )
__UpperCamelCase :Optional[Any] = [1, 0]
__UpperCamelCase :Any = _generate_iterable_examples(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Reverse the partitions.
__UpperCamelCase :Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
__UpperCamelCase , __UpperCamelCase :int = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__UpperCamelCase :str = spark.range(10 ).repartition(1 )
__UpperCamelCase :Tuple = SparkExamplesIterable(SCREAMING_SNAKE_CASE )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :str = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__UpperCamelCase :Union[str, Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
__UpperCamelCase :int = lambda SCREAMING_SNAKE_CASE : x.reverse()
__UpperCamelCase :List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE , [2, 1, 0] )
__UpperCamelCase :Union[str, Any] = SparkExamplesIterable(SCREAMING_SNAKE_CASE ).shuffle_data_sources(SCREAMING_SNAKE_CASE )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase , __UpperCamelCase :str = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Dict = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__UpperCamelCase :List[str] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
__UpperCamelCase :Union[str, Any] = SparkExamplesIterable(SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
__UpperCamelCase :Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE , [0, 2] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase , __UpperCamelCase :int = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
__UpperCamelCase :Optional[Any] = SparkExamplesIterable(SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
__UpperCamelCase :Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE , [1, 3] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase , __UpperCamelCase :str = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
__UpperCamelCase :List[Any] = spark.range(100 ).repartition(1 )
__UpperCamelCase :List[Any] = Spark(SCREAMING_SNAKE_CASE )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 452 | def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = generate_pascal_triangle(SCREAMING_SNAKE_CASE )
for row_idx in range(SCREAMING_SNAKE_CASE ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase :list[list[int]] = []
for current_row_idx in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[Any] = populate_current_row(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
triangle.append(SCREAMING_SNAKE_CASE )
return triangle
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :int = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__UpperCamelCase , __UpperCamelCase :int = 1, 1
for current_col_idx in range(1 , SCREAMING_SNAKE_CASE ):
calculate_current_element(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return current_row
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
__UpperCamelCase :Dict = triangle[current_row_idx - 1][current_col_idx - 1]
__UpperCamelCase :List[Any] = triangle[current_row_idx - 1][current_col_idx]
__UpperCamelCase :List[Any] = above_to_left_elt + above_to_right_elt
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
__UpperCamelCase :list[list[int]] = [[1]]
for row_index in range(1 , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = [0] + result[-1] + [0]
__UpperCamelCase :Any = row_index + 1
# Calculate the number of distinct elements in a row
__UpperCamelCase :Optional[Any] = sum(divmod(SCREAMING_SNAKE_CASE , 2 ) )
__UpperCamelCase :Union[str, Any] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
__UpperCamelCase :List[Any] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__UpperCamelCase :List[str] = row_first_half + row_second_half
result.append(SCREAMING_SNAKE_CASE )
return result
def lowerCamelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> None:
__UpperCamelCase :List[str] = f"""{func.__name__}({value})"""
__UpperCamelCase :Optional[int] = timeit(f"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 452 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase ={
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 285 |
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
def get_matched_characters(UpperCamelCase__ , UpperCamelCase__ ) -> str:
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Union[str, Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCamelCase__ : Optional[int] = int(max(0 , i - limit ) )
UpperCamelCase__ : List[str] = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCamelCase__ )
UpperCamelCase__ : List[str] = f'''{_stra[0:_stra.index(UpperCamelCase__ )]} {_stra[_stra.index(UpperCamelCase__ ) + 1:]}'''
return "".join(UpperCamelCase__ )
# matching characters
UpperCamelCase__ : Any = get_matched_characters(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase__ : Union[str, Any] = get_matched_characters(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase__ : List[Any] = len(UpperCamelCase__ )
# transposition
UpperCamelCase__ : str = (
len([(ca, ca) for ca, ca in zip(UpperCamelCase__ , UpperCamelCase__ ) if ca != ca] ) // 2
)
if not match_count:
UpperCamelCase__ : Union[str, Any] = 0.0
else:
UpperCamelCase__ : Optional[int] = (
1
/ 3
* (
match_count / len(UpperCamelCase__ )
+ match_count / len(UpperCamelCase__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCamelCase__ : Dict = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 285 | 1 |
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
__A = []
__A = 1
while len(__lowercase ) < 1E6:
constant.append(str(__lowercase ) )
i += 1
__A = """""".join(__lowercase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[9_9] )
* int(constant[9_9_9] )
* int(constant[9_9_9_9] )
* int(constant[9_9_9_9_9] )
* int(constant[9_9_9_9_9_9] )
)
if __name__ == "__main__":
print(solution())
| 199 |
def _SCREAMING_SNAKE_CASE ( __lowercase : List[Any] , __lowercase : Dict , __lowercase : str ) -> Dict:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__lowercase , n - 1 , __lowercase ) * a) % mod
else:
__A = binary_exponentiation(__lowercase , n / 2 , __lowercase )
return (b * b) % mod
# a prime number
__a : Union[str, Any] = 701
__a : Dict = 1000000000
__a : Union[str, Any] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 199 | 1 |
SCREAMING_SNAKE_CASE__ : Dict = 9.80665
def __lowercase ( snake_case, snake_case, snake_case = g ):
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 0 |
import string
def lowerCamelCase_ ( lowerCAmelCase: str )-> str:
_snake_case : str = ''
for i in sequence:
_snake_case : Tuple = ord(lowerCAmelCase )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def lowerCamelCase_ ( lowerCAmelCase: str )-> str:
_snake_case : str = string.ascii_letters
_snake_case : List[str] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCAmelCase )] if c in letters else c for c in sequence )
def lowerCamelCase_ ( )-> None:
from timeit import timeit
print('Running performance benchmarks...' )
_snake_case : List[str] = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(F"""> atbash_slow(): {timeit('atbash_slow(printable)' , setup=lowerCAmelCase )} seconds""" )
print(F"""> atbash(): {timeit('atbash(printable)' , setup=lowerCAmelCase )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 411 | 0 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowerCamelCase ( __snake_case ):
"""simple docstring"""
def lowercase_ ( self ):
A_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__UpperCamelCase , "num_attention_heads" ) )
class lowerCamelCase :
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=64 , __UpperCamelCase=3 , __UpperCamelCase=3 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=16 , __UpperCamelCase=[128, 256, 384] , __UpperCamelCase=[4, 6, 8] , __UpperCamelCase=[2, 3, 4] , __UpperCamelCase=[16, 16, 16] , __UpperCamelCase=0 , __UpperCamelCase=[2, 2, 2] , __UpperCamelCase=[2, 2, 2] , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=2 , ):
A_ = parent
A_ = batch_size
A_ = image_size
A_ = num_channels
A_ = kernel_size
A_ = stride
A_ = padding
A_ = hidden_sizes
A_ = num_attention_heads
A_ = depths
A_ = key_dim
A_ = drop_path_rate
A_ = patch_size
A_ = attention_ratio
A_ = mlp_ratio
A_ = initializer_range
A_ = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
A_ = is_training
A_ = use_labels
A_ = num_labels
A_ = initializer_range
def lowercase_ ( self ):
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = LevitModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A_ = model(__UpperCamelCase )
A_ = (self.image_size, self.image_size)
A_ , A_ = image_size[0], image_size[1]
for _ in range(4 ):
A_ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
A_ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = self.num_labels
A_ = LevitForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
A_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self ):
A_ = self.prepare_config_and_inputs()
A_ , A_ , A_ = config_and_inputs
A_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase_ ( self ):
A_ = LevitModelTester(self )
A_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def lowercase_ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def lowercase_ ( self ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def lowercase_ ( self ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(__UpperCamelCase )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowercase_ ( self ):
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
A_ = outputs.hidden_states
A_ = len(self.model_tester.depths ) + 1
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
A_ = (self.model_tester.image_size, self.model_tester.image_size)
A_ , A_ = image_size[0], image_size[1]
for _ in range(4 ):
A_ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
A_ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase_ ( self ):
pass
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ):
A_ = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase_ ( self ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase_ ( self ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
def lowercase_ ( self ):
if not self.model_tester.is_training:
return
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
A_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
A_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
A_ = model(**__UpperCamelCase ).loss
loss.backward()
def lowercase_ ( self ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A_ = False
A_ = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
A_ = model_class(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCamelCase )
model.train()
A_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
A_ = model(**__UpperCamelCase ).loss
loss.backward()
def lowercase_ ( self ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ):
A_ = problem_type["title"]
A_ = problem_type["num_labels"]
A_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
A_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if problem_type["num_labels"] > 1:
A_ = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
A_ = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCamelCase ) as warning_list:
A_ = model(**__UpperCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def lowercase_ ( self ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = LevitModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowerCAmelCase ( )-> int:
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase_ ( self ):
A_ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__UpperCamelCase )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
A_ = model(**__UpperCamelCase )
# verify the logits
A_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A_ = torch.tensor([1.0448, -0.3745, -1.8317] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 608 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCAmelCase ( snake_case__ : int = 3 )-> qiskit.result.counts.Counts:
if isinstance(snake_case__ , snake_case__ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(snake_case__ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
A_ = QuantumRegister(snake_case__ , "qr" )
A_ = ClassicalRegister(snake_case__ , "cr" )
A_ = QuantumCircuit(snake_case__ , snake_case__ )
A_ = number_of_qubits
for i in range(snake_case__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(snake_case__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , snake_case__ , snake_case__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(snake_case__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(snake_case__ , snake_case__ )
# simulate with 10000 shots
A_ = Aer.get_backend("qasm_simulator" )
A_ = execute(snake_case__ , snake_case__ , shots=10000 )
return job.result().get_counts(snake_case__ )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 608 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a_ ( _UpperCamelCase ):
lowercase = 42
lowercase = 42
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 2000 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
UpperCamelCase = self.unet.config.sample_size
UpperCamelCase = (batch_size, 3, img_size, img_size)
UpperCamelCase = self.unet
UpperCamelCase = randn_tensor(__a , generator=__a ) * self.scheduler.init_noise_sigma
UpperCamelCase = sample.to(self.device )
self.scheduler.set_timesteps(__a )
self.scheduler.set_sigmas(__a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
UpperCamelCase = self.unet(__a , __a ).sample
UpperCamelCase = self.scheduler.step_correct(__a , __a , generator=__a ).prev_sample
# prediction step
UpperCamelCase = model(__a , __a ).sample
UpperCamelCase = self.scheduler.step_pred(__a , __a , __a , generator=__a )
UpperCamelCase ,UpperCamelCase = output.prev_sample, output.prev_sample_mean
UpperCamelCase = sample_mean.clamp(0 , 1 )
UpperCamelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(__a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__a )
| 301 |
def UpperCamelCase_( snake_case__: str , snake_case__: list[str] ) -> str:
UpperCAmelCase__ = ''
for word_or_phrase in separated:
if not isinstance(snake_case__ , snake_case__ ):
raise Exception('join() accepts only strings to be joined' )
joined += word_or_phrase + separator
return joined.strip(snake_case__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 146 | 0 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
a : Optional[int] = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json'''
with io.open(filename, '''r''', encoding='''utf-8''') as f:
a : str = json.load(f)
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self , __lowercase ):
return FSMTTokenizer.from_pretrained(__lowercase )
def A__ ( self , __lowercase ):
UpperCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(__lowercase ).to(__lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def A__ ( self , __lowercase , __lowercase ):
UpperCAmelCase__ = F'''facebook/wmt19-{pair}'''
UpperCAmelCase__ = self.get_tokenizer(__lowercase )
UpperCAmelCase__ = self.get_model(__lowercase )
UpperCAmelCase__ = bleu_data[pair]['src']
UpperCAmelCase__ = bleu_data[pair]['tgt']
UpperCAmelCase__ = tokenizer(__lowercase , return_tensors="""pt""" , truncation=__lowercase , padding="""longest""" ).to(__lowercase )
UpperCAmelCase__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
UpperCAmelCase__ = tokenizer.batch_decode(
__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase )
UpperCAmelCase__ = calculate_bleu(__lowercase , __lowercase )
print(__lowercase )
self.assertGreaterEqual(scores["""bleu"""] , __lowercase )
| 704 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
def A__ ( self , __lowercase ):
with open(__lowercase , encoding="""utf-8""" ) as input_file:
UpperCAmelCase__ = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
UpperCAmelCase__ = input_file.read()
UpperCAmelCase__ = regexp.search(__lowercase )
return match
def A__ ( self , __lowercase ):
with open(__lowercase , encoding="""utf-8""" ) as input_file:
UpperCAmelCase__ = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
UpperCAmelCase__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase__ = regexp.finditer(__lowercase )
UpperCAmelCase__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A__ ( self ):
UpperCAmelCase__ = Path("""./datasets""" )
UpperCAmelCase__ = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowercase ) ):
raise AssertionError(F'''open(...) must use utf-8 encoding in {dataset}''' )
def A__ ( self ):
UpperCAmelCase__ = Path("""./datasets""" )
UpperCAmelCase__ = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowercase ) ):
raise AssertionError(F'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 422 | 0 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _snake_case ( ):
_UpperCamelCase = HfArgumentParser(__snake_case )
_UpperCamelCase = parser.parse_args_into_dataclasses()[0]
_UpperCamelCase = TensorFlowBenchmark(args=__snake_case )
try:
_UpperCamelCase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_UpperCamelCase = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
_UpperCamelCase = ''' '''.join(str(__snake_case ).split(''' ''' )[:-1] )
_UpperCamelCase = ''''''
_UpperCamelCase = eval(str(__snake_case ).split(''' ''' )[-1] )
_UpperCamelCase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__snake_case )
if len(__snake_case ) > 0:
_UpperCamelCase = full_error_msg + begin_error_msg + str(__snake_case )
raise ValueError(__snake_case )
benchmark.run()
if __name__ == "__main__":
main()
| 10 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__A = logging.get_logger(__name__)
__A = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=None , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if config is None:
assert isinstance(self.model , __SCREAMING_SNAKE_CASE ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
__UpperCAmelCase =self.model.config
else:
__UpperCAmelCase =config
__UpperCAmelCase =data_args
__UpperCAmelCase =self.config.tgt_vocab_size if isinstance(self.config , __SCREAMING_SNAKE_CASE ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
""" padding..""" )
if self.args.label_smoothing == 0:
__UpperCAmelCase =torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__UpperCAmelCase =label_smoothed_nll_loss
def _a ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Any:
if self.optimizer is None:
__UpperCAmelCase =["""bias""", """LayerNorm.weight"""]
__UpperCAmelCase =[
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
__UpperCAmelCase =Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__UpperCAmelCase =Adafactor
__UpperCAmelCase ={"""scale_parameter""": False, """relative_step""": False}
else:
__UpperCAmelCase =AdamW
__UpperCAmelCase ={
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
__UpperCAmelCase =self.args.learning_rate
if self.sharded_ddp:
__UpperCAmelCase =OSS(
params=__SCREAMING_SNAKE_CASE , optim=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
__UpperCAmelCase =optimizer_cls(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.lr_scheduler is None:
__UpperCAmelCase =self._get_lr_scheduler(__SCREAMING_SNAKE_CASE )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any:
__UpperCAmelCase =arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__UpperCAmelCase =schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
__UpperCAmelCase =schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
__UpperCAmelCase =schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE )
return scheduler
def _a ( self : Optional[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
__UpperCAmelCase =self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
__UpperCAmelCase , __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[:2]
else:
# compute label smoothed loss
__UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
__UpperCAmelCase =torch.nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 )
__UpperCAmelCase , __UpperCAmelCase =self.loss_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
__UpperCAmelCase =inputs.pop("""labels""" )
__UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return loss
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, Union[torch.Tensor, Any]] , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
__UpperCAmelCase =self._prepare_inputs(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase ={
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__UpperCAmelCase =self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__SCREAMING_SNAKE_CASE , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
__UpperCAmelCase =inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
__UpperCAmelCase , __UpperCAmelCase =self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase =loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__UpperCAmelCase =generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__UpperCAmelCase =self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> List[Any]:
# If PAD token is not defined at least EOS token has to be defined
__UpperCAmelCase =self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f''' padded to `max_length`={max_length}''' )
__UpperCAmelCase =pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
__UpperCAmelCase =tensor
return padded_tensor
| 68 | 0 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class __UpperCAmelCase :
__A : Union[str, Any] = MBartConfig
__A : Optional[Any] = {}
__A : int = 'gelu'
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=20 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , ):
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = eos_token_id
lowerCAmelCase_ = pad_token_id
lowerCAmelCase_ = bos_token_id
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase_ = prepare_mbart_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = TFMBartModel(config=_lowerCamelCase ).get_decoder()
lowerCAmelCase_ = inputs_dict['''input_ids''']
lowerCAmelCase_ = input_ids[:1, :]
lowerCAmelCase_ = inputs_dict['''attention_mask'''][:1, :]
lowerCAmelCase_ = inputs_dict['''head_mask''']
lowerCAmelCase_ = 1
# first forward pass
lowerCAmelCase_ = model(_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
lowerCAmelCase_ ,lowerCAmelCase_ = outputs.to_tuple()
lowerCAmelCase_ = past_key_values[1]
def snake_case_ ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Optional[Any]=None , __snake_case : str=None , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Any=None , ) -> Union[str, Any]:
if attention_mask is None:
lowerCAmelCase_ = tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
lowerCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
lowerCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
lowerCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __UpperCAmelCase ( __a , __a , unittest.TestCase ):
__A : Optional[int] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__A : Union[str, Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__A : Any = (
{
'conversational': TFMBartForConditionalGeneration,
'feature-extraction': TFMBartModel,
'summarization': TFMBartForConditionalGeneration,
'text2text-generation': TFMBartForConditionalGeneration,
'translation': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : Optional[Any] = True
__A : Dict = False
__A : str = False
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = TFMBartModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_lowerCamelCase )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class __UpperCAmelCase ( unittest.TestCase ):
__A : Tuple = [
' UN Chief Says There Is No Military Solution in Syria',
]
__A : str = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
]
__A : Dict = 'facebook/mbart-large-en-ro'
@cached_property
def UpperCAmelCase_ ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCAmelCase_ ( self , **_lowerCamelCase ):
lowerCAmelCase_ = self.translate_src_text(**_lowerCamelCase )
self.assertListEqual(self.expected_text , _lowerCamelCase )
def UpperCAmelCase_ ( self , **_lowerCamelCase ):
lowerCAmelCase_ = self.tokenizer(self.src_text , **_lowerCamelCase , return_tensors='''tf''' )
lowerCAmelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
lowerCAmelCase_ = self.tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
return generated_words
@slow
def UpperCAmelCase_ ( self ):
self._assert_generated_batch_equal_expected()
| 709 | '''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case_ ( __snake_case : Optional[Any]) -> Union[str, Any]:
lowerCAmelCase_ = [False] * len(__snake_case)
lowerCAmelCase_ = [-1] * len(__snake_case)
def dfs(__snake_case : str , __snake_case : Any):
lowerCAmelCase_ = True
lowerCAmelCase_ = c
for u in graph[v]:
if not visited[u]:
dfs(__snake_case , 1 - c)
for i in range(len(__snake_case)):
if not visited[i]:
dfs(__snake_case , 0)
for i in range(len(__snake_case)):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
A_ : Optional[Any] ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 606 | 0 |
'''simple docstring'''
def __UpperCamelCase( _A : int = 1 , _A : int = 10_00 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : Optional[int] = 0
for divide_by_number in range(_A , digit + 1 ):
UpperCAmelCase__ : list[int] = []
UpperCAmelCase__ : Union[str, Any] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(_A ):
UpperCAmelCase__ : Tuple = len(_A )
UpperCAmelCase__ : Any = divide_by_number
else:
has_been_divided.append(_A )
UpperCAmelCase__ : Dict = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 614 | '''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( lowerCAmelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Any = KandinskyInpaintPipeline
UpperCAmelCase_ : str = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
UpperCAmelCase_ : Union[str, Any] = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
UpperCAmelCase_ : Union[str, Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase_ : Optional[int] = False
@property
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
return 100
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Any = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=1005 ,)
UpperCAmelCase__ : Any = MultilingualCLIP(lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCAmelCase__ : List[str] = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.dummy_text_encoder
UpperCAmelCase__ : Optional[int] = self.dummy_tokenizer
UpperCAmelCase__ : Optional[Any] = self.dummy_unet
UpperCAmelCase__ : Any = self.dummy_movq
UpperCAmelCase__ : Tuple = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule='''linear''' ,beta_start=0.0_0085 ,beta_end=0.012 ,clip_sample=lowerCamelCase_ ,set_alpha_to_one=lowerCamelCase_ ,steps_offset=1 ,prediction_type='''epsilon''' ,thresholding=lowerCamelCase_ ,)
UpperCAmelCase__ : Dict = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_=0 ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Dict = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
UpperCAmelCase__ : str = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(lowerCamelCase_ )
# create init_image
UpperCAmelCase__ : str = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1 )[0]
UpperCAmelCase__ : Union[str, Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
UpperCAmelCase__ : List[str] = np.ones((64, 64) ,dtype=np.floataa )
UpperCAmelCase__ : Optional[Any] = 0
if str(lowerCamelCase_ ).startswith('''mps''' ):
UpperCAmelCase__ : Any = torch.manual_seed(lowerCamelCase_ )
else:
UpperCAmelCase__ : Dict = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
UpperCAmelCase__ : Optional[Any] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''cpu'''
UpperCAmelCase__ : List[str] = self.get_dummy_components()
UpperCAmelCase__ : Tuple = self.pipeline_class(**lowerCamelCase_ )
UpperCAmelCase__ : int = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase__ : Tuple = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
UpperCAmelCase__ : List[Any] = output.images
UpperCAmelCase__ : Union[str, Any] = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ) ,return_dict=lowerCamelCase_ ,)[0]
UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase__ : int = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : List[Any] = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
UpperCAmelCase__ : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
UpperCAmelCase__ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase__ : Union[str, Any] = np.ones((768, 768) ,dtype=np.floataa )
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : List[Any] = '''a hat'''
UpperCAmelCase__ : str = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' ,torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
UpperCAmelCase__ : List[str] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' ,torch_dtype=torch.floataa )
UpperCAmelCase__ : Tuple = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCAmelCase__ : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ : int = pipe_prior(
lowerCamelCase_ ,generator=lowerCamelCase_ ,num_inference_steps=5 ,negative_prompt='''''' ,).to_tuple()
UpperCAmelCase__ : Optional[Any] = pipeline(
lowerCamelCase_ ,image=lowerCamelCase_ ,mask_image=lowerCamelCase_ ,image_embeds=lowerCamelCase_ ,negative_image_embeds=lowerCamelCase_ ,generator=lowerCamelCase_ ,num_inference_steps=100 ,height=768 ,width=768 ,output_type='''np''' ,)
UpperCAmelCase__ : str = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase_ ,lowerCamelCase_ )
| 614 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = ['''image_processor''', '''tokenizer''']
__UpperCAmelCase = '''CLIPImageProcessor'''
__UpperCAmelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_) -> Dict:
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
__snake_case = kwargs.pop('feature_extractor')
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowercase_ , lowercase_)
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_) -> List[Any]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
__snake_case = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_)
if images is not None:
__snake_case = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_)
if text is not None and images is not None:
__snake_case = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_) , tensor_type=lowercase_)
def _a ( self , *lowercase_ , **lowercase_) -> Any:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _a ( self , *lowercase_ , **lowercase_) -> str:
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def _a ( self) -> Dict:
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 676 |
from __future__ import annotations
class __lowercase :
def __init__( self , lowercase_) -> None:
__snake_case = data
__snake_case = None
__snake_case = None
def A ( snake_case__ : Node | None ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A ( snake_case__ : Node | None ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A ( snake_case__ : Node ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A ( ) -> None: # Main function for testing.
'''simple docstring'''
__snake_case = Node(1 )
__snake_case = Node(2 )
__snake_case = Node(3 )
__snake_case = Node(4 )
__snake_case = Node(5 )
__snake_case = Node(6 )
__snake_case = Node(7 )
__snake_case = Node(8 )
__snake_case = Node(9 )
print(is_full_binary_tree(snake_case__ ) )
print(depth_of_tree(snake_case__ ) )
print('Tree is: ' )
display(snake_case__ )
if __name__ == "__main__":
main()
| 676 | 1 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a_ : int = '\\n\n'
a_ : Tuple = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
a_ : List[Any] = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string'),
}) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = 16 , a = True , a=None) -> List[Any]:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
SCREAMING_SNAKE_CASE = 'cuda'
else:
SCREAMING_SNAKE_CASE = 'cuda' if torch.cuda.is_available() else 'cpu'
SCREAMING_SNAKE_CASE = AutoModelForCausalLM.from_pretrained(a)
SCREAMING_SNAKE_CASE = model.to(a)
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(a)
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
SCREAMING_SNAKE_CASE = list(tokenizer.special_tokens_map_extended.values())
# check that the model already has at least one special token defined
assert (
len(a) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]})
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
SCREAMING_SNAKE_CASE = model.config.max_length - 1
else:
SCREAMING_SNAKE_CASE = model.config.max_length
SCREAMING_SNAKE_CASE = tokenizer(
a , add_special_tokens=a , padding=a , truncation=a , max_length=a , return_tensors='pt' , return_attention_mask=a , ).to(a)
SCREAMING_SNAKE_CASE = encodings['input_ids']
SCREAMING_SNAKE_CASE = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1) , 1)), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1) , 2)), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = CrossEntropyLoss(reduction='none')
for start_index in logging.tqdm(range(0 , len(a) , a)):
SCREAMING_SNAKE_CASE = min(start_index + batch_size , len(a))
SCREAMING_SNAKE_CASE = encoded_texts[start_index:end_index]
SCREAMING_SNAKE_CASE = attn_masks[start_index:end_index]
if add_start_token:
SCREAMING_SNAKE_CASE = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0)).to(a)
SCREAMING_SNAKE_CASE = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1)
SCREAMING_SNAKE_CASE = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa).to(a), attn_mask] , dim=1)
SCREAMING_SNAKE_CASE = encoded_batch
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(a , attention_mask=a).logits
SCREAMING_SNAKE_CASE = out_logits[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE = attn_mask[..., 1:].contiguous()
SCREAMING_SNAKE_CASE = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2) , a) * shift_attention_mask_batch).sum(1)
/ shift_attention_mask_batch.sum(1))
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(a)}
| 73 |
from __future__ import annotations
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = []
create_all_state(1 , _lowerCAmelCase , _lowerCAmelCase , [] , _lowerCAmelCase )
return result
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCAmelCase , total_number - level + 2 ):
current_list.append(_lowerCAmelCase )
create_all_state(i + 1 , _lowerCAmelCase , level - 1 , _lowerCAmelCase , _lowerCAmelCase )
current_list.pop()
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
for i in total_list:
print(*_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =4
__lowerCAmelCase =2
__lowerCAmelCase =generate_all_combinations(n, k)
print_all_state(total_list)
| 333 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_UpperCAmelCase = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
_UpperCAmelCase = os.path.join(self.tmpdirname , snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case , snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_UpperCAmelCase = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
_UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(snake_case , return_tensors='np' )
_UpperCAmelCase = processor(images=snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = processor(text=snake_case )
_UpperCAmelCase = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(snake_case ):
processor()
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(snake_case )
_UpperCAmelCase = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 24 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def UpperCAmelCase ( A : int , A : int , A : int ):
'''simple docstring'''
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
_UpperCAmelCase = b * b - 4 * a * c
_UpperCAmelCase = (-b + sqrt(A )) / (2 * a)
_UpperCAmelCase = (-b - sqrt(A )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 )
print(f'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 24 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase : Tuple = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__a ) , torch_builtin(__a ) ) )
self.assertFalse(torch.allclose(gelu_python(__a ) , gelu_new(__a ) ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase : Optional[int] = get_activation("gelu" )
lowerCamelCase : Optional[Any] = get_activation("gelu_10" )
lowerCamelCase : str = torch_builtin(__a )
lowerCamelCase : List[Any] = geluaa(__a )
lowerCamelCase : Optional[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__a ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _snake_case ( self ):
"""simple docstring"""
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__a ):
get_activation("bogus" )
with self.assertRaises(__a ):
get_activation(__a )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = get_activation("gelu" )
lowerCamelCase : Optional[int] = 1
lowerCamelCase : List[str] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__a ):
lowerCamelCase : List[str] = acta.a
| 340 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'detr'
lowercase__ = ['past_key_values']
lowercase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __a=True , __a=None , __a=3 , __a=1_00 , __a=6 , __a=20_48 , __a=8 , __a=6 , __a=20_48 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__a , __a):
_UpperCamelCase = backbone_config.get('''model_type''')
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(__a)
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> int:
'''simple docstring'''
return cls(backbone_config=__a , **__a)
def UpperCAmelCase ( self) -> Dict[str, any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = version.parse('1.11' )
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def UpperCAmelCase ( self) -> float:
'''simple docstring'''
return 1e-5
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 12
| 19 | 0 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Dict = ["input_features", "is_longer"]
def __init__( self : Dict , snake_case : int=64 , snake_case : Dict=48000 , snake_case : Tuple=480 , snake_case : Optional[Any]=10 , snake_case : Union[str, Any]=1024 , snake_case : Union[str, Any]=0.0 , snake_case : List[str]=False , snake_case : float = 0 , snake_case : float = 14000 , snake_case : int = None , snake_case : str = "fusion" , snake_case : str = "repeatpad" , **snake_case : int , ):
super().__init__(
feature_size=snake_case , sampling_rate=snake_case , padding_value=snake_case , return_attention_mask=snake_case , **snake_case , )
__UpperCamelCase = top_db
__UpperCamelCase = truncation
__UpperCamelCase = padding
__UpperCamelCase = fft_window_size
__UpperCamelCase = (fft_window_size >> 1) + 1
__UpperCamelCase = hop_length
__UpperCamelCase = max_length_s
__UpperCamelCase = max_length_s * sampling_rate
__UpperCamelCase = sampling_rate
__UpperCamelCase = frequency_min
__UpperCamelCase = frequency_max
__UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm=snake_case , mel_scale='''htk''' , )
__UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=snake_case , min_frequency=snake_case , max_frequency=snake_case , sampling_rate=snake_case , norm='''slaney''' , mel_scale='''slaney''' , )
def snake_case ( self : Union[str, Any] ):
__UpperCamelCase = copy.deepcopy(self.__dict__ )
__UpperCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def snake_case ( self : Tuple , snake_case : np.array , snake_case : Optional[np.array] = None ):
__UpperCamelCase = spectrogram(
snake_case , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=snake_case , log_mel='''dB''' , )
return log_mel_spectrogram.T
def snake_case ( self : Optional[int] , snake_case : Any , snake_case : List[Any] , snake_case : int ):
__UpperCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__UpperCamelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__UpperCamelCase = [0]
# randomly choose index for each part
__UpperCamelCase = np.random.choice(ranges[0] )
__UpperCamelCase = np.random.choice(ranges[1] )
__UpperCamelCase = np.random.choice(ranges[2] )
__UpperCamelCase = mel[idx_front : idx_front + chunk_frames, :]
__UpperCamelCase = mel[idx_middle : idx_middle + chunk_frames, :]
__UpperCamelCase = mel[idx_back : idx_back + chunk_frames, :]
__UpperCamelCase = torch.tensor(mel[None, None, :] )
__UpperCamelCase = torch.nn.functional.interpolate(
snake_case , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=snake_case )
__UpperCamelCase = mel_shrink[0][0].numpy()
__UpperCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def snake_case ( self : Optional[Any] , snake_case : np.array , snake_case : Optional[int] , snake_case : Tuple , snake_case : str ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__UpperCamelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__UpperCamelCase = len(snake_case ) - max_length
__UpperCamelCase = np.random.randint(0 , overflow + 1 )
__UpperCamelCase = waveform[idx : idx + max_length]
__UpperCamelCase = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__UpperCamelCase = self._np_extract_fbank_features(snake_case , self.mel_filters )
__UpperCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__UpperCamelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__UpperCamelCase = np.stack([mel, mel, mel, mel] , axis=0 )
__UpperCamelCase = False
else:
__UpperCamelCase = self._random_mel_fusion(snake_case , snake_case , snake_case )
__UpperCamelCase = True
else:
raise NotImplementedError(F"data_truncating {truncation} not implemented" )
else:
__UpperCamelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__UpperCamelCase = int(max_length / len(snake_case ) )
__UpperCamelCase = np.stack(np.tile(snake_case , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__UpperCamelCase = int(max_length / len(snake_case ) )
__UpperCamelCase = np.stack(np.tile(snake_case , snake_case ) )
__UpperCamelCase = np.pad(snake_case , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
__UpperCamelCase = self._np_extract_fbank_features(snake_case , self.mel_filters )
__UpperCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__UpperCamelCase = self._np_extract_fbank_features(snake_case , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[str] , snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , snake_case : str = None , snake_case : Optional[str] = None , snake_case : Optional[int] = None , snake_case : Optional[int] = None , snake_case : Optional[Union[str, TensorType]] = None , **snake_case : Any , ):
__UpperCamelCase = truncation if truncation is not None else self.truncation
__UpperCamelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
F" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__UpperCamelCase = isinstance(snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
__UpperCamelCase = is_batched_numpy or (
isinstance(snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__UpperCamelCase = [np.asarray(snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case , np.ndarray ):
__UpperCamelCase = np.asarray(snake_case , dtype=np.floataa )
elif isinstance(snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__UpperCamelCase = [np.asarray(snake_case )]
# convert to mel spectrogram, truncate and pad if needed.
__UpperCamelCase = [
self._get_input_mel(snake_case , max_length if max_length else self.nb_max_samples , snake_case , snake_case )
for waveform in raw_speech
]
__UpperCamelCase = []
__UpperCamelCase = []
for mel, longer in padded_inputs:
input_mel.append(snake_case )
is_longer.append(snake_case )
if truncation == "fusion" and sum(snake_case ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__UpperCamelCase = np.random.randint(0 , len(snake_case ) )
__UpperCamelCase = True
if isinstance(input_mel[0] , snake_case ):
__UpperCamelCase = [np.asarray(snake_case , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__UpperCamelCase = [[longer] for longer in is_longer]
__UpperCamelCase = {'''input_features''': input_mel, '''is_longer''': is_longer}
__UpperCamelCase = BatchFeature(snake_case )
if return_tensors is not None:
__UpperCamelCase = input_features.convert_to_tensors(snake_case )
return input_features
| 375 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Dict = "unispeech"
def __init__( self : str , snake_case : Union[str, Any]=32 , snake_case : Optional[Any]=768 , snake_case : Dict=12 , snake_case : Tuple=12 , snake_case : Optional[Any]=3072 , snake_case : Any="gelu" , snake_case : Dict=0.1 , snake_case : Tuple=0.1 , snake_case : str=0.1 , snake_case : Optional[int]=0.0 , snake_case : Any=0.0 , snake_case : Optional[Any]=0.1 , snake_case : List[Any]=0.1 , snake_case : Optional[int]=0.02 , snake_case : List[str]=1E-5 , snake_case : str="group" , snake_case : List[Any]="gelu" , snake_case : Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , snake_case : List[Any]=(5, 2, 2, 2, 2, 2, 2) , snake_case : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , snake_case : Tuple=False , snake_case : Optional[int]=128 , snake_case : List[str]=16 , snake_case : List[str]=False , snake_case : Dict=True , snake_case : Optional[Any]=0.05 , snake_case : Optional[Any]=10 , snake_case : Union[str, Any]=2 , snake_case : List[str]=0.0 , snake_case : str=10 , snake_case : int=0 , snake_case : Tuple=320 , snake_case : Any=2 , snake_case : List[str]=0.1 , snake_case : Optional[Any]=100 , snake_case : List[Any]=256 , snake_case : Union[str, Any]=256 , snake_case : Any=0.1 , snake_case : str="mean" , snake_case : Union[str, Any]=False , snake_case : str=False , snake_case : Union[str, Any]=256 , snake_case : Optional[Any]=80 , snake_case : str=0 , snake_case : int=1 , snake_case : int=2 , snake_case : Dict=0.5 , **snake_case : Optional[int] , ):
super().__init__(**snake_case , pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case )
__UpperCamelCase = hidden_size
__UpperCamelCase = feat_extract_norm
__UpperCamelCase = feat_extract_activation
__UpperCamelCase = list(snake_case )
__UpperCamelCase = list(snake_case )
__UpperCamelCase = list(snake_case )
__UpperCamelCase = conv_bias
__UpperCamelCase = num_conv_pos_embeddings
__UpperCamelCase = num_conv_pos_embedding_groups
__UpperCamelCase = len(self.conv_dim )
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_dropout
__UpperCamelCase = attention_dropout
__UpperCamelCase = activation_dropout
__UpperCamelCase = feat_proj_dropout
__UpperCamelCase = final_dropout
__UpperCamelCase = layerdrop
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = initializer_range
__UpperCamelCase = num_ctc_classes
__UpperCamelCase = vocab_size
__UpperCamelCase = do_stable_layer_norm
__UpperCamelCase = use_weighted_layer_sum
__UpperCamelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase = apply_spec_augment
__UpperCamelCase = mask_time_prob
__UpperCamelCase = mask_time_length
__UpperCamelCase = mask_time_min_masks
__UpperCamelCase = mask_feature_prob
__UpperCamelCase = mask_feature_length
__UpperCamelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase = num_codevectors_per_group
__UpperCamelCase = num_codevector_groups
__UpperCamelCase = contrastive_logits_temperature
__UpperCamelCase = feat_quantizer_dropout
__UpperCamelCase = num_negatives
__UpperCamelCase = codevector_dim
__UpperCamelCase = proj_codevector_dim
__UpperCamelCase = diversity_loss_weight
# ctc loss
__UpperCamelCase = ctc_loss_reduction
__UpperCamelCase = ctc_zero_infinity
# pretraining loss
__UpperCamelCase = replace_prob
@property
def snake_case ( self : Dict ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 375 | 1 |
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def UpperCAmelCase__ ( self : Any , _A : str , _A : Tuple=0 ):
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE : int = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE : Dict = floats_tensor((1, 3, 16, 16) , rng=random.Random(_A ) ).to(_A )
__SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__SCREAMING_SNAKE_CASE : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__SCREAMING_SNAKE_CASE : str = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
self._test_save_load_local()
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 74 |
def a ( A__ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A__ , A__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(A__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase = """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
UpperCamelCase = F'''https://www.google.com/search?q={query}&num=100'''
UpperCamelCase = requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
UpperCamelCase = (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
UpperCamelCase = parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 152 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = 42
snake_case = 42
snake_case = None
class _lowerCamelCase ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
snake_case = 2
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 0.0_2 , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = 1.0_0_7 , _SCREAMING_SNAKE_CASE = 80 , _SCREAMING_SNAKE_CASE = 0.0_5 , _SCREAMING_SNAKE_CASE = 50 , )->Optional[Any]:
'''simple docstring'''
A_ : Tuple = sigma_max
# setable values
A_ : int = None
A_ : np.IntTensor = None
A_ : torch.FloatTensor = None # sigma(t_i)
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->torch.FloatTensor:
'''simple docstring'''
return sample
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->List[Any]:
'''simple docstring'''
A_ : int = num_inference_steps
A_ : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
A_ : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
A_ : Any = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
A_ : Any = torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )->Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A_ : int = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
A_ : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
A_ : Tuple = self.config.s_noise * randn_tensor(sample.shape , generator=_SCREAMING_SNAKE_CASE ).to(sample.device )
A_ : Any = sigma + gamma * sigma
A_ : int = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , )->Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A_ : Dict = sample_hat + sigma_hat * model_output
A_ : Optional[int] = (sample_hat - pred_original_sample) / sigma_hat
A_ : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , )->Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A_ : Any = sample_prev + sigma_prev * model_output
A_ : str = (sample_prev - pred_original_sample) / sigma_prev
A_ : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
raise NotImplementedError()
| 152 | 1 |
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
lowerCAmelCase_ : Union[str, Any] = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
lowerCAmelCase_ : str = parser.parse_args()
if args.check_lib:
lowerCAmelCase_ : int = importlib.import_module('''transformers''')
lowerCAmelCase_ : Union[str, Any] = Path(transformers_module.__file__).parent
else:
lowerCAmelCase_ : Optional[int] = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 673 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase=False ):
'''simple docstring'''
UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase = """"""
else:
UpperCAmelCase = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = dct.pop(lowerCAmelCase )
UpperCAmelCase = val
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = DeiTConfig()
# all deit models have fine-tuned heads
UpperCAmelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase = 1000
UpperCAmelCase = """huggingface/label-files"""
UpperCAmelCase = """imagenet-1k-id2label.json"""
UpperCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = int(deit_name[-6:-4] )
UpperCAmelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
UpperCAmelCase = 192
UpperCAmelCase = 768
UpperCAmelCase = 12
UpperCAmelCase = 3
elif deit_name[9:].startswith("""small""" ):
UpperCAmelCase = 384
UpperCAmelCase = 1536
UpperCAmelCase = 12
UpperCAmelCase = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
UpperCAmelCase = 1024
UpperCAmelCase = 4096
UpperCAmelCase = 24
UpperCAmelCase = 16
# load original model from timm
UpperCAmelCase = timm.create_model(lowerCAmelCase , pretrained=lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase = timm_model.state_dict()
UpperCAmelCase = create_rename_keys(lowerCAmelCase , lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# load HuggingFace model
UpperCAmelCase = DeiTForImageClassificationWithTeacher(lowerCAmelCase ).eval()
model.load_state_dict(lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
UpperCAmelCase = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
UpperCAmelCase = DeiTImageProcessor(size=lowerCAmelCase , crop_size=config.image_size )
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors="""pt""" )
UpperCAmelCase = encoding["""pixel_values"""]
UpperCAmelCase = model(lowerCAmelCase )
UpperCAmelCase = timm_model(lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--deit_name''',
default='''vit_deit_base_distilled_patch16_224''',
type=str,
help='''Name of the DeiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCAmelCase_ : str = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 673 | 1 |
lowerCAmelCase__ = "Input must be a string of 8 numbers plus letter"
lowerCAmelCase__ = "TRWAGMYFPDXBNJZSQVHLCKE"
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCamelCase : Optional[int] = F'''Expected string as input, found {type(_SCREAMING_SNAKE_CASE ).__name__}'''
raise TypeError(_SCREAMING_SNAKE_CASE )
_UpperCamelCase : Tuple = spanish_id.replace('-' , '' ).upper()
if len(_SCREAMING_SNAKE_CASE ) != 9:
raise ValueError(_SCREAMING_SNAKE_CASE )
try:
_UpperCamelCase : Optional[int] = int(spanish_id_clean[0:8] )
_UpperCamelCase : List[Any] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_SCREAMING_SNAKE_CASE ) from ex
if letter.isdigit():
raise ValueError(_SCREAMING_SNAKE_CASE )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 449 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: int , lowerCamelCase_: int ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 449 | 1 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
_snake_case = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
_snake_case = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = ''' Hello world! cécé herlolip'''
_snake_case = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCamelCase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
UpperCamelCase = dct.pop(_lowercase )
UpperCamelCase = val
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCamelCase = torch.load(_lowercase , map_location='cpu' )
UpperCamelCase = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def __lowerCamelCase ( _lowercase ) -> Dict:
UpperCamelCase , UpperCamelCase = emb.weight.shape
UpperCamelCase = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
UpperCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase=None ) -> List[Any]:
if not os.path.exists(_lowercase ):
UpperCamelCase = torch.hub.load('pytorch/fairseq' , _lowercase ).eval()
else:
UpperCamelCase = load_xsum_checkpoint(_lowercase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
UpperCamelCase = checkpoint_path.replace('.' , '-' )
UpperCamelCase = BartConfig.from_pretrained(_lowercase )
UpperCamelCase = bart.encode(_lowercase ).unsqueeze(0 )
UpperCamelCase = BartTokenizer.from_pretrained(_lowercase ).encode(_lowercase , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(_lowercase , _lowercase ).all():
raise ValueError(
F'converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}' )
if checkpoint_path == "bart.large.mnli":
UpperCamelCase = bart.state_dict()
remove_ignore_keys_(_lowercase )
UpperCamelCase = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(_lowercase , _lowercase , _lowercase )
UpperCamelCase = BartForSequenceClassification(_lowercase ).eval()
model.load_state_dict(_lowercase )
UpperCamelCase = bart.predict('mnli' , _lowercase , return_logits=_lowercase )
UpperCamelCase = model(_lowercase )[0] # logits
else: # no classification heads to worry about
UpperCamelCase = bart.model.state_dict()
remove_ignore_keys_(_lowercase )
UpperCamelCase = state_dict['decoder.embed_tokens.weight']
UpperCamelCase = bart.extract_features(_lowercase )
if hf_checkpoint_name == "facebook/bart-large":
UpperCamelCase = BartModel(_lowercase ).eval()
model.load_state_dict(_lowercase )
UpperCamelCase = model(_lowercase ).model[0]
else:
UpperCamelCase = BartForConditionalGeneration(_lowercase ).eval() # an existing summarization ckpt
model.model.load_state_dict(_lowercase )
if hasattr(_lowercase , 'lm_head' ):
UpperCamelCase = make_linear_from_emb(model.model.shared )
UpperCamelCase = model.model(_lowercase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
_snake_case = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 170 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =DanceDiffusionPipeline
SCREAMING_SNAKE_CASE_ : str =UNCONDITIONAL_AUDIO_GENERATION_PARAMS
SCREAMING_SNAKE_CASE_ : int =PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
SCREAMING_SNAKE_CASE_ : Optional[int] =UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ : Optional[int] =False
SCREAMING_SNAKE_CASE_ : Union[str, Any] =False
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=SCREAMING_SNAKE_CASE__ , use_timestep_embedding=SCREAMING_SNAKE_CASE__ , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
UpperCamelCase = IPNDMScheduler()
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str=0 ):
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = DanceDiffusionPipeline(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = pipe(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = output.audios
UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
UpperCamelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
return super().test_save_load_local()
@skip_mps
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def __lowerCAmelCase ( self : Any ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
def __lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
UpperCamelCase = torch_device
UpperCamelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
UpperCamelCase = output.audios
UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = torch_device
UpperCamelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
UpperCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
UpperCamelCase = output.audios
UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
UpperCamelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 170 | 1 |
'''simple docstring'''
__UpperCAmelCase ="Tobias Carryer"
from time import time
class a__ :
def __init__( self : Dict , a : Optional[Any] , a : Optional[Any] , a : str , a : Optional[int]=int(time() ) ): # noqa: B008
"""simple docstring"""
__lowerCamelCase = multiplier
__lowerCamelCase = increment
__lowerCamelCase = modulo
__lowerCamelCase = seed
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__UpperCAmelCase =LinearCongruentialGenerator(1_6_6_4_5_2_5, 1_0_1_3_9_0_4_2_2_3, 2 << 3_1)
while True:
print(lcg.next_number())
| 546 |
__lowerCamelCase = """Tobias Carryer"""
from time import time
class UpperCAmelCase :
def __init__(self : Dict , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[int]=int(time() ) ) -> List[Any]: # noqa: B008
'''simple docstring'''
snake_case : Dict = multiplier
snake_case : Dict = increment
snake_case : Union[str, Any] = modulo
snake_case : Tuple = seed
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__lowerCamelCase = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31)
while True:
print(lcg.next_number())
| 204 | 0 |
import argparse
from collections import defaultdict
import yaml
_snake_case = "docs/source/en/_toctree.yml"
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = defaultdict(_lowerCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
_lowerCAmelCase : List[str] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : Union[str, Any] = []
for duplicate_key in duplicates:
_lowerCAmelCase : List[str] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : Dict = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : Optional[int] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_lowerCAmelCase : Optional[Any] = api_doc[model_idx]["sections"]
_lowerCAmelCase : Optional[int] = [(idx, section) for idx, section in enumerate(_lowerCamelCase ) if "sections" in section]
_lowerCAmelCase : Optional[Any] = False
for idx, modality_doc in modalities_docs:
_lowerCAmelCase : Tuple = modality_doc["sections"]
_lowerCAmelCase : Optional[Any] = clean_model_doc_toc(_lowerCamelCase )
if old_modality_doc != new_modality_doc:
_lowerCAmelCase : int = True
if overwrite:
_lowerCAmelCase : Any = new_modality_doc
if diff:
if overwrite:
_lowerCAmelCase : Union[str, Any] = model_doc
_lowerCAmelCase : List[str] = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 658 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase : list[int] , __UpperCamelCase : int ):
'''simple docstring'''
__lowercase = 0
__lowercase = len(__UpperCamelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__lowercase = i + 1
else:
__lowercase = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 566 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase__( snake_case_ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=9_9 , __UpperCAmelCase=3_2 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=3_7 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=1_6 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="None" , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
"""simple docstring"""
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_input_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = num_choices
__lowercase = relative_attention
__lowercase = position_biased_input
__lowercase = pos_att_type
__lowercase = scope
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.get_config()
__lowercase = 3_0_0
return config
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = DebertaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowercase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0]
__lowercase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0]
__lowercase = model(__UpperCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = DebertaForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowercase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = DebertaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowercase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = DebertaForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowercase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__lowercase = DebertaForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowercase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : Tuple = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase : str = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase : List[Any] = True
UpperCamelCase : Optional[Any] = False
UpperCamelCase : List[Any] = False
UpperCamelCase : Tuple = False
UpperCamelCase : List[str] = False
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = DebertaModelTester(self )
__lowercase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=3_7 )
def __magic_name__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCAmelCase )
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCAmelCase )
@slow
def __magic_name__ ( self ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = DebertaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __magic_name__ ( self ):
"""simple docstring"""
pass
@slow
def __magic_name__ ( self ):
"""simple docstring"""
__lowercase = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
__lowercase = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
__lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowercase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
# compare the actual values for a slice.
__lowercase = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 566 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = StableDiffusionSAGPipeline
SCREAMING_SNAKE_CASE_ : int = TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ : int = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ : Tuple = False
def __A ( self ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
SCREAMING_SNAKE_CASE = CLIPTextModel(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Any:
if str(lowerCAmelCase__ ).startswith('mps' ):
SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __A ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
SCREAMING_SNAKE_CASE = sag_pipe.to(lowerCAmelCase__ )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = '.'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = sag_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
SCREAMING_SNAKE_CASE = sag_pipe.to(lowerCAmelCase__ )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = '.'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = sag_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
SCREAMING_SNAKE_CASE = output.images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def __A ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
SCREAMING_SNAKE_CASE = sag_pipe.to(lowerCAmelCase__ )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = '.'
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowerCAmelCase__ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images
assert image.shape == (1, 512, 768, 3)
| 327 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE = BlipImageProcessor()
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel' )
SCREAMING_SNAKE_CASE = BlipProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def __A ( self , **lowerCAmelCase__ ) -> Tuple:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).tokenizer
def __A ( self , **lowerCAmelCase__ ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).image_processor
def __A ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = image_processor(lowerCAmelCase__ , return_tensors='np' )
SCREAMING_SNAKE_CASE = processor(images=lowerCAmelCase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = processor(text=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ) -> int:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE = self.get_image_processor()
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = BlipProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'lower newer'
SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
SCREAMING_SNAKE_CASE = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 327 | 1 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
a : int = 8
def lowercase_ ( _UpperCamelCase , _UpperCamelCase=BITS ):
'''simple docstring'''
__lowercase = x.device
__lowercase = (x * 2_55).int().clamp(0 , 2_55 )
__lowercase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCamelCase )
__lowercase = rearrange(__lowerCamelCase , '''d -> d 1 1''' )
__lowercase = rearrange(__lowerCamelCase , '''b c h w -> b c 1 h w''' )
__lowercase = ((x & mask) != 0).float()
__lowercase = rearrange(__lowerCamelCase , '''b c d h w -> b (c d) h w''' )
__lowercase = bits * 2 - 1
return bits
def lowercase_ ( _UpperCamelCase , _UpperCamelCase=BITS ):
'''simple docstring'''
__lowercase = x.device
__lowercase = (x > 0).int()
__lowercase = 2 ** torch.arange(bits - 1 , -1 , -1 , device=__lowerCamelCase , dtype=torch.intaa )
__lowercase = rearrange(__lowerCamelCase , '''d -> d 1 1''' )
__lowercase = rearrange(__lowerCamelCase , '''b (c d) h w -> b c d h w''' , d=8 )
__lowercase = reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 2_55).clamp(0.0 , 1.0 )
def lowercase_ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.0 , _UpperCamelCase = True , _UpperCamelCase=None , _UpperCamelCase = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__lowercase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__lowercase = self.alphas_cumprod[timestep]
__lowercase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__lowercase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__lowercase = self.bit_scale
if self.config.clip_sample:
__lowercase = torch.clamp(__lowerCamelCase , -scale , __lowerCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
__lowercase = self._get_variance(__lowerCamelCase , __lowerCamelCase )
__lowercase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__lowercase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowercase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__lowercase = model_output.device if torch.is_tensor(__lowerCamelCase ) else '''cpu'''
__lowercase = torch.randn(model_output.shape , dtype=model_output.dtype , generator=__lowerCamelCase ).to(__lowerCamelCase )
__lowercase = self._get_variance(__lowerCamelCase , __lowerCamelCase ) ** 0.5 * eta * noise
__lowercase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=__lowerCamelCase , pred_original_sample=__lowerCamelCase )
def lowercase_ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="epsilon" , _UpperCamelCase=None , _UpperCamelCase = True , ):
'''simple docstring'''
__lowercase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__lowercase , __lowercase = torch.split(__lowerCamelCase , sample.shape[1] , dim=1 )
else:
__lowercase = None
# 1. compute alphas, betas
__lowercase = self.alphas_cumprod[t]
__lowercase = self.alphas_cumprod[t - 1] if t > 0 else self.one
__lowercase = 1 - alpha_prod_t
__lowercase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__lowercase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__lowercase = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
__lowercase = self.bit_scale
if self.config.clip_sample:
__lowercase = torch.clamp(__lowerCamelCase , -scale , __lowerCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__lowercase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowercase = 0
if t > 0:
__lowercase = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=__lowerCamelCase ).to(model_output.device )
__lowercase = (self._get_variance(__lowerCamelCase , predicted_variance=__lowerCamelCase ) ** 0.5) * noise
__lowercase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=__lowerCamelCase , pred_original_sample=__lowerCamelCase )
class lowerCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ = 1.0 , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
__lowercase = bit_scale
__lowercase = (
ddim_bit_scheduler_step if isinstance(snake_case_ , snake_case_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self , snake_case_ = 2_5_6 , snake_case_ = 2_5_6 , snake_case_ = 5_0 , snake_case_ = None , snake_case_ = 1 , snake_case_ = "pil" , snake_case_ = True , **snake_case_ , ) -> Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
__lowercase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=snake_case_ , )
__lowercase = decimal_to_bits(snake_case_ ) * self.bit_scale
__lowercase = latents.to(self.device )
self.scheduler.set_timesteps(snake_case_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__lowercase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
__lowercase = bits_to_decimal(snake_case_ )
if output_type == "pil":
__lowercase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 639 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ = TypeVar("""T""")
class a_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , A , A ) -> None:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = len(A )
_SCREAMING_SNAKE_CASE = [any_type for _ in range(self.N )] + arr
_SCREAMING_SNAKE_CASE = fnc
self.build()
def snake_case_( self ) -> None:
for p in range(self.N - 1 , 0 , -1 ):
_SCREAMING_SNAKE_CASE = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def snake_case_( self , A , A ) -> None:
p += self.N
_SCREAMING_SNAKE_CASE = v
while p > 1:
_SCREAMING_SNAKE_CASE = p // 2
_SCREAMING_SNAKE_CASE = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def snake_case_( self , A , A ) -> T | None: # noqa: E741
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = l + self.N, r + self.N
_SCREAMING_SNAKE_CASE = None
while l <= r:
if l % 2 == 1:
_SCREAMING_SNAKE_CASE = self.st[l] if res is None else self.fn(A , self.st[l] )
if r % 2 == 0:
_SCREAMING_SNAKE_CASE = self.st[r] if res is None else self.fn(A , self.st[r] )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
lowercase_ = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
lowercase_ = SegmentTree(test_array, min)
lowercase_ = SegmentTree(test_array, max)
lowercase_ = SegmentTree(test_array, lambda a, b: a + b)
def lowerCamelCase ( ) ->None:
for i in range(len(__lowerCamelCase ) ):
for j in range(__lowerCamelCase , len(__lowerCamelCase ) ):
_SCREAMING_SNAKE_CASE = reduce(__lowerCamelCase , test_array[i : j + 1] )
_SCREAMING_SNAKE_CASE = reduce(__lowerCamelCase , test_array[i : j + 1] )
_SCREAMING_SNAKE_CASE = reduce(lambda __lowerCamelCase , __lowerCamelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__lowerCamelCase , __lowerCamelCase )
assert max_range == max_segment_tree.query(__lowerCamelCase , __lowerCamelCase )
assert sum_range == sum_segment_tree.query(__lowerCamelCase , __lowerCamelCase )
test_all_segments()
for index, value in test_updates.items():
lowercase_ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 314 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case_ = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 388 |
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[str] = 'new-model'
if is_tf_available():
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Optional[Any] = NewModelConfig
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = '''bert-base-cased'''
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = '''bert-base-cased'''
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForPreTraining.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : int ):
"""simple docstring"""
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForCausalLM.from_pretrained(a__ )
__snake_case , __snake_case = TFAutoModelForCausalLM.from_pretrained(a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : int ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelWithLMHead.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : str ):
"""simple docstring"""
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForMaskedLM.from_pretrained(a__ )
__snake_case , __snake_case = TFAutoModelForMaskedLM.from_pretrained(a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : Any ):
"""simple docstring"""
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(a__ )
__snake_case , __snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : Union[str, Any] ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForSequenceClassification.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
def a (self : str ):
"""simple docstring"""
for model_name in ["bert-base-uncased"]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForQuestionAnswering.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
@slow
@require_tensorflow_probability
def a (self : List[str] ):
"""simple docstring"""
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__snake_case = AutoConfig.from_pretrained(a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
__snake_case = TFAutoModelForTableQuestionAnswering.from_pretrained(a__ )
__snake_case , __snake_case = TFAutoModelForTableQuestionAnswering.from_pretrained(
a__ , output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertIsInstance(a__ , a__ )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = TFAutoModelWithLMHead.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 1_4410 )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = TFAutoModelWithLMHead.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=a__ ) , 1_4410 )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(a__ , a__ )
__snake_case = copy.deepcopy(model.config )
__snake_case = ['''FunnelBaseModel''']
__snake_case = TFAutoModel.from_config(a__ )
self.assertIsInstance(a__ , a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a__ )
__snake_case = TFAutoModel.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def a (self : str ):
"""simple docstring"""
try:
AutoConfig.register('''new-model''' , a__ )
__snake_case = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(a__ ):
auto_class.register(a__ , a__ )
auto_class.register(a__ , a__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a__ ):
auto_class.register(a__ , a__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case = BertModelTester(self ).get_config()
__snake_case = NewModelConfig(**tiny_config.to_dict() )
__snake_case = auto_class.from_config(a__ )
self.assertIsInstance(a__ , a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a__ )
__snake_case = auto_class.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def a (self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
a__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__snake_case = TFAutoModel.from_pretrained('''bert-base''' )
def a (self : Dict ):
"""simple docstring"""
with self.assertRaisesRegex(
a__ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__snake_case = TFAutoModel.from_pretrained(a__ , revision='''aaaaaa''' )
def a (self : Tuple ):
"""simple docstring"""
with self.assertRaisesRegex(
a__ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
__snake_case = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def a (self : int ):
"""simple docstring"""
with self.assertRaisesRegex(a__ , '''Use `from_pt=True` to load this model''' ):
__snake_case = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def a (self : List[str] ):
"""simple docstring"""
__snake_case = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
__snake_case = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__snake_case = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
__snake_case = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 388 | 1 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
a__ = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self , _a , _a , _a = None , _a = None ) -> List[Any]:
_a : Union[str, Any] = None
_a : Optional[Any] = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
_a : Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(_a ):
if item not in EXCLUDE_EXAMPLES:
_a : Any = os.path.join(_a , _a )
if os.path.isfile(_a ) and ".py" in item_path:
with self.subTest(
tested_script=_a , feature_script=_a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
_a : Optional[int] = compare_against_test(
os.path.join(_a , _a ) , _a , _a , _a )
_a : Union[str, Any] = '''\n'''.join(_a )
if special_strings is not None:
for string in special_strings:
_a : Union[str, Any] = diff.replace(_a , '''''' )
self.assertEqual(_a , '''''' )
def __lowercase ( self ) -> Optional[Any]:
self.one_complete_example('''complete_nlp_example.py''' , _a )
self.one_complete_example('''complete_nlp_example.py''' , _a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[int] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
_a : int = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , _a , _a , _a )
self.one_complete_example('''complete_cv_example.py''' , _a , _a , _a )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = False
@classmethod
def __lowercase ( cls ) -> List[Any]:
super().setUpClass()
_a : str = tempfile.mkdtemp()
_a : str = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_a : int = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __lowercase ( cls ) -> Optional[int]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
_a : List[str] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def __lowercase ( self ) -> Any:
_a : Dict = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
_a : str = run_command(self._launch_args + testargs , return_stdout=_a )
self.assertNotIn('''epoch 0:''' , _a )
self.assertIn('''epoch 1:''' , _a )
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
_a : Optional[int] = run_command(self._launch_args + testargs , return_stdout=_a )
if torch.cuda.is_available():
_a : List[Any] = torch.cuda.device_count()
else:
_a : Tuple = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , _a )
self.assertIn('''epoch 1:''' , _a )
else:
self.assertIn('''epoch 0:''' , _a )
self.assertIn('''epoch 1:''' , _a )
@slow
def __lowercase ( self ) -> Union[str, Any]:
_a : List[str] = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
_a : Tuple = run_command(self._launch_args + testargs , return_stdout=_a )
_a : int = re.findall('''({.+})''' , _a )
_a : int = [r for r in results if '''accuracy''' in r][-1]
_a : Optional[Any] = ast.literal_eval(_a )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def __lowercase ( self ) -> str:
_a : Optional[int] = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
_a : str = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_a , '''tracking''' ) ) )
def __lowercase ( self ) -> Optional[int]:
_a : List[str] = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def __lowercase ( self ) -> List[Any]:
_a : Union[str, Any] = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 14 |
import math
def _A (UpperCamelCase : list , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 ) ->list:
'''simple docstring'''
lowerCamelCase__ : Tuple = end or len(UpperCamelCase )
for i in range(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : List[str] = i
lowerCamelCase__ : List[str] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase__ : Union[str, Any] = array[temp_index - 1]
temp_index -= 1
lowerCamelCase__ : Dict = temp_index_value
return array
def _A (UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int ) ->None: # Max Heap
'''simple docstring'''
lowerCamelCase__ : Any = index
lowerCamelCase__ : int = 2 * index + 1 # Left Node
lowerCamelCase__ : str = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase__ : Tuple = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase__ : List[Any] = right_index
if largest != index:
lowerCamelCase__ ,lowerCamelCase__ : Any = array[largest], array[index]
heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def _A (UpperCamelCase : list ) ->list:
'''simple docstring'''
lowerCamelCase__ : Optional[int] = len(UpperCamelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase__ ,lowerCamelCase__ : int = array[0], array[i]
heapify(UpperCamelCase , 0 , UpperCamelCase )
return array
def _A (UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _A (UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = low
lowerCamelCase__ : Optional[int] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase__ ,lowerCamelCase__ : Dict = array[j], array[i]
i += 1
def _A (UpperCamelCase : list ) ->list:
'''simple docstring'''
if len(UpperCamelCase ) == 0:
return array
lowerCamelCase__ : Any = 2 * math.ceil(math.loga(len(UpperCamelCase ) ) )
lowerCamelCase__ : Union[str, Any] = 16
return intro_sort(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase , UpperCamelCase )
def _A (UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ) ->list:
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(UpperCamelCase )
max_depth -= 1
lowerCamelCase__ : Tuple = median_of_a(UpperCamelCase , UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase__ : Optional[int] = partition(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
intro_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : str = p
return insertion_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = input('''Enter numbers separated by a comma : ''').strip()
_lowercase = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 157 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : Dict = DiTPipeline
_snake_case : int = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_snake_case : str = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
_snake_case : int = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_snake_case : int = False
def lowerCAmelCase_ ( self : Dict ):
torch.manual_seed(0 )
_UpperCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__lowerCAmelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=__lowerCAmelCase , )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict=0 ):
if str(__lowerCAmelCase ).startswith("""mps""" ):
_UpperCAmelCase = torch.manual_seed(__lowerCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
_UpperCAmelCase = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = """cpu"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(__lowerCAmelCase )
_UpperCAmelCase = pipe(**__lowerCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_UpperCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1e-3 )
def lowerCAmelCase_ ( self : Dict ):
self._test_inference_batch_single_identical(relax_max_difference=__lowerCAmelCase , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase_ ( self : List[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_UpperCAmelCase = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_UpperCAmelCase = pipe.get_label_ids(__lowerCAmelCase )
_UpperCAmelCase = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = load_numpy(
f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-2
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_UpperCAmelCase = ["""vase""", """umbrella"""]
_UpperCAmelCase = pipe.get_label_ids(__lowerCAmelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
f'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 275 | """simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a ( unittest.TestCase ):
def __init__( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=7 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : List[Any]=18 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Any=400 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[Any]=True , ):
_UpperCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = apply_ocr
def lowerCAmelCase_ ( self : int ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """apply_ocr""" ) )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : List[str] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __lowerCAmelCase )
self.assertIsInstance(encoding.boxes , __lowerCAmelCase )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase_ ( self : Any ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# with apply_OCR = True
_UpperCAmelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
_UpperCAmelCase = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
_UpperCAmelCase = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_UpperCAmelCase = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
_UpperCAmelCase = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __lowerCAmelCase )
self.assertListEqual(encoding.boxes , __lowerCAmelCase )
# with apply_OCR = False
_UpperCAmelCase = LayoutLMvaImageProcessor(apply_ocr=__lowerCAmelCase )
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 275 | 1 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
lowerCAmelCase = 1_0
lowerCAmelCase = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
lowerCAmelCase = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [9_7], '''text''': ['''1976''']}] * 1_0,
'''id''': list(range(snake_case__ ) ),
} , features=snake_case__ , )
return dataset
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Any:
lowerCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=snake_case__ )
return filename
# FILE_CONTENT + files
lowercase__ : Any = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int:
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
lowerCAmelCase = FILE_CONTENT
with open(snake_case__ , '''w''' ) as f:
f.write(snake_case__ )
return filename
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
import bza
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
lowerCAmelCase = bytes(snake_case__ , '''utf-8''' )
with bza.open(snake_case__ , '''wb''' ) as f:
f.write(snake_case__ )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[Any]:
import gzip
lowerCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
lowerCAmelCase = bytes(snake_case__ , '''utf-8''' )
with gzip.open(snake_case__ , '''wb''' ) as f:
f.write(snake_case__ )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[Any]:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
lowerCAmelCase = bytes(snake_case__ , '''utf-8''' )
with lza.frame.open(snake_case__ , '''wb''' ) as f:
f.write(snake_case__ )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Optional[Any]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(snake_case__ , '''w''' ) as archive:
archive.write(snake_case__ , arcname=os.path.basename(snake_case__ ) )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> List[Any]:
import tarfile
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(snake_case__ , arcname=os.path.basename(snake_case__ ) )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]:
import lzma
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
lowerCAmelCase = bytes(snake_case__ , '''utf-8''' )
with lzma.open(snake_case__ , '''wb''' ) as f:
f.write(snake_case__ )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Any:
import zipfile
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(snake_case__ , '''w''' ) as f:
f.write(snake_case__ , arcname=os.path.basename(snake_case__ ) )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
lowerCAmelCase = bytes(snake_case__ , '''utf-8''' )
with zstd.open(snake_case__ , '''wb''' ) as f:
f.write(snake_case__ )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> List[Any]:
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
lowerCAmelCase = textwrap.dedent(
'''\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(snake_case__ , '''w''' ) as f:
f.write(snake_case__ )
return filename
lowercase__ : Union[str, Any] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
lowercase__ : Tuple = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
lowercase__ : Any = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
lowercase__ : Optional[int] = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
lowercase__ : List[Any] = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( ) -> str:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[Any]:
lowerCAmelCase = datasets.Dataset.from_dict(snake_case__ )
lowerCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=snake_case__ )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
lowerCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(snake_case__ ) ) as con:
lowerCAmelCase = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Tuple:
lowerCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(snake_case__ , '''w''' , newline='''''' ) as f:
lowerCAmelCase = csv.DictWriter(snake_case__ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(snake_case__ )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]:
lowerCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(snake_case__ , '''w''' , newline='''''' ) as f:
lowerCAmelCase = csv.DictWriter(snake_case__ , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(snake_case__ )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> List[Any]:
import bza
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(snake_case__ , '''rb''' ) as f:
lowerCAmelCase = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(snake_case__ , '''wb''' ) as f:
f.write(snake_case__ )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(snake_case__ , '''w''' ) as f:
f.write(snake_case__ , arcname=os.path.basename(snake_case__ ) )
f.write(snake_case__ , arcname=os.path.basename(snake_case__ ) )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int:
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(snake_case__ , '''w''' ) as f:
f.write(snake_case__ , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(snake_case__ , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Any:
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(snake_case__ , '''w''' ) as f:
f.write(snake_case__ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case__ ) ) )
f.write(snake_case__ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case__ ) ) )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
lowerCAmelCase = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(snake_case__ , '''wb''' ) as f:
lowerCAmelCase = pq.ParquetWriter(snake_case__ , schema=snake_case__ )
lowerCAmelCase = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(snake_case__ ) )] for k in DATA[0]} , schema=snake_case__ )
writer.write_table(snake_case__ )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Tuple:
lowerCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
lowerCAmelCase = {'''data''': DATA}
with open(snake_case__ , '''w''' ) as f:
json.dump(snake_case__ , snake_case__ )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[int]:
lowerCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
lowerCAmelCase = {'''data''': DATA_DICT_OF_LISTS}
with open(snake_case__ , '''w''' ) as f:
json.dump(snake_case__ , snake_case__ )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(snake_case__ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(snake_case__ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Any:
lowerCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(snake_case__ , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(snake_case__ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
lowerCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(snake_case__ , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(snake_case__ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[Any]:
lowerCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(snake_case__ , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(snake_case__ ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> int:
import gzip
lowerCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(snake_case__ , '''rb''' ) as orig_file:
with gzip.open(snake_case__ , '''wb''' ) as zipped_file:
zipped_file.writelines(snake_case__ )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Optional[int]:
import gzip
lowerCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(snake_case__ , '''rb''' ) as orig_file:
with gzip.open(snake_case__ , '''wb''' ) as zipped_file:
zipped_file.writelines(snake_case__ )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(snake_case__ , '''w''' ) as f:
f.write(snake_case__ , arcname=os.path.basename(snake_case__ ) )
f.write(snake_case__ , arcname=os.path.basename(snake_case__ ) )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(snake_case__ , '''w''' ) as f:
f.write(snake_case__ , arcname=os.path.join('''nested''' , os.path.basename(snake_case__ ) ) )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(snake_case__ , '''w''' ) as f:
f.write(snake_case__ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case__ ) ) )
f.write(snake_case__ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case__ ) ) )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]:
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(snake_case__ , arcname=os.path.basename(snake_case__ ) )
f.add(snake_case__ , arcname=os.path.basename(snake_case__ ) )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[str]:
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(snake_case__ , '''w''' ) as f:
f.add(snake_case__ , arcname=os.path.join('''nested''' , os.path.basename(snake_case__ ) ) )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Tuple:
lowerCAmelCase = ['''0''', '''1''', '''2''', '''3''']
lowerCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(snake_case__ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Dict:
lowerCAmelCase = ['''0''', '''1''', '''2''', '''3''']
lowerCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(snake_case__ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Dict:
lowerCAmelCase = ['''0''', '''1''', '''2''', '''3''']
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(snake_case__ , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Dict:
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(snake_case__ , '''w''' ) as f:
f.write(snake_case__ , arcname=os.path.basename(snake_case__ ) )
f.write(snake_case__ , arcname=os.path.basename(snake_case__ ) )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(snake_case__ , '''w''' ) as f:
f.write(snake_case__ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case__ ) ) )
f.write(snake_case__ , arcname=os.path.join('''main_dir''' , os.path.basename(snake_case__ ) ) )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(snake_case__ , '''w''' ) as f:
f.write(snake_case__ , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(snake_case__ , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[Any]:
lowerCAmelCase = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
lowerCAmelCase = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(snake_case__ )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( ) -> Tuple:
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> List[str]:
lowerCAmelCase = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(snake_case__ , '''w''' ) as f:
f.write(snake_case__ , arcname=os.path.basename(snake_case__ ) )
f.write(snake_case__ , arcname=os.path.basename(snake_case__ ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[Any]:
lowerCAmelCase = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 1_0 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 1_0 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 1_0 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 1_0 )
return data_dir
| 312 | import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = ScoreSdeVeScheduler()
lowerCAmelCase = ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
sde_ve.to(__SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__SCREAMING_SNAKE_CASE ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type='''numpy''' , generator=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE )[
0
]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = '''google/ncsnpp-church-256'''
lowerCAmelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = ScoreSdeVeScheduler.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = ScoreSdeVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
sde_ve.to(__SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=10 , output_type='''numpy''' , generator=__SCREAMING_SNAKE_CASE ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 312 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = ["pixel_values"]
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 0.9 , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 / 255 , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = size if size is not None else {"shortest_edge": 224}
lowercase_ = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowercase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowercase_ = get_size_dict(UpperCAmelCase , param_name="crop_size" )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = crop_pct
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BICUBIC , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
lowercase_ = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
lowercase_ = int(size["shortest_edge"] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowercase_ = int(size["height"] / crop_pct )
else:
lowercase_ = (int(size["height"] / crop_pct ), int(size["width"] / crop_pct ))
else:
raise ValueError("Invalid size for resize: {}".format(UpperCAmelCase ) )
lowercase_ = get_resize_output_image_size(UpperCAmelCase , size=UpperCAmelCase , default_to_square=UpperCAmelCase )
else:
if "shortest_edge" in size:
lowercase_ = get_resize_output_image_size(UpperCAmelCase , size=size["shortest_edge"] , default_to_square=UpperCAmelCase )
elif "height" in size and "width" in size:
lowercase_ = (size["height"], size["width"])
else:
raise ValueError("Invalid size for resize: {}".format(UpperCAmelCase ) )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
lowercase_ = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(UpperCAmelCase , size=(size["height"], size["width"]) , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> int:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = crop_pct if crop_pct is not None else self.crop_pct
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(UpperCAmelCase , param_name="crop_size" )
lowercase_ = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_pct is None:
raise ValueError("Crop_pct must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
lowercase_ = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , crop_pct=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase_ = {"pixel_values": images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 717 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 601 | 0 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__SCREAMING_SNAKE_CASE = {
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
__SCREAMING_SNAKE_CASE = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
__SCREAMING_SNAKE_CASE = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def A_ ( __lowercase ):
UpperCamelCase_ : Optional[Any] ={letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def A_ ( __lowercase ):
return x[0]
def A_ ( __lowercase ):
UpperCamelCase_ : List[str] =get_letter_count(__lowercase )
UpperCamelCase_ : dict[int, list[str]] ={
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__lowercase )
UpperCamelCase_ : dict[int, str] ={}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__lowercase )
UpperCamelCase_ : str =''.join(freq_to_letter[freq] )
UpperCamelCase_ : Union[str, Any] =list(freq_to_letter_str.items() )
freq_pairs.sort(key=__lowercase , reverse=__lowercase )
UpperCamelCase_ : list[str] =[freq_pair[1] for freq_pair in freq_pairs]
return "".join(__lowercase )
def A_ ( __lowercase ):
UpperCamelCase_ : str =get_frequency_order(__lowercase )
UpperCamelCase_ : Optional[int] =0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
'''simple docstring'''
import socket
def lowerCAmelCase__ ( ):
_A : Dict = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
_A : List[Any] = socket.gethostname()
_A : List[str] = 12312
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' ,'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
_A : Optional[int] = sock.recv(1024 )
if not data:
break
out_file.write(lowerCamelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 128 | 0 |
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _a ( __a ):
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowercase , '''num_attention_heads''' ) )
class _a :
def __init__( self : Any , lowercase : Optional[Any] , lowercase : Dict=13 , lowercase : List[Any]=64 , lowercase : int=3 , lowercase : Dict=3 , lowercase : Dict=2 , lowercase : Any=1 , lowercase : Optional[int]=16 , lowercase : List[str]=[128, 256, 384] , lowercase : List[str]=[4, 6, 8] , lowercase : List[str]=[2, 3, 4] , lowercase : List[Any]=[16, 16, 16] , lowercase : Union[str, Any]=0 , lowercase : str=[2, 2, 2] , lowercase : str=[2, 2, 2] , lowercase : Union[str, Any]=0.02 , lowercase : Optional[Any]=True , lowercase : Union[str, Any]=True , lowercase : str=2 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = kernel_size
UpperCAmelCase = stride
UpperCAmelCase = padding
UpperCAmelCase = hidden_sizes
UpperCAmelCase = num_attention_heads
UpperCAmelCase = depths
UpperCAmelCase = key_dim
UpperCAmelCase = drop_path_rate
UpperCAmelCase = patch_size
UpperCAmelCase = attention_ratio
UpperCAmelCase = mlp_ratio
UpperCAmelCase = initializer_range
UpperCAmelCase = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = num_labels
UpperCAmelCase = initializer_range
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def A ( self : Dict , lowercase : Any , lowercase : Dict , lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = LevitModel(config=lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase )
UpperCAmelCase = (self.image_size, self.image_size)
UpperCAmelCase , UpperCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
UpperCAmelCase = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCAmelCase = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def A ( self : Any , lowercase : Dict , lowercase : List[Any] , lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.num_labels
UpperCAmelCase = LevitForImageClassification(lowercase )
model.to(lowercase )
model.eval()
UpperCAmelCase = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( __a , __a , unittest.TestCase ):
__a : str = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__a : Union[str, Any] = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__a : Any = False
__a : Optional[Any] = False
__a : Union[str, Any] = False
__a : Optional[Any] = False
__a : List[Any] = False
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = LevitModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def A ( self : List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Optional[Any] ):
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def A ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def A ( self : int ):
'''simple docstring'''
pass
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(lowercase )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : Dict ):
'''simple docstring'''
def check_hidden_states_output(lowercase : Optional[int] , lowercase : Any , lowercase : Tuple ):
UpperCAmelCase = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(lowercase , lowercase ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowercase ) , lowercase )
UpperCAmelCase = (self.model_tester.image_size, self.model_tester.image_size)
UpperCAmelCase , UpperCAmelCase = image_size[0], image_size[1]
for _ in range(4 ):
UpperCAmelCase = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCAmelCase = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(lowercase , lowercase , lowercase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : int ):
'''simple docstring'''
pass
def A ( self : Tuple , lowercase : str , lowercase : Optional[Any] , lowercase : Union[str, Any]=False ):
'''simple docstring'''
UpperCAmelCase = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCAmelCase = model_class(lowercase )
model.to(lowercase )
model.train()
UpperCAmelCase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
UpperCAmelCase = model(**lowercase ).loss
loss.backward()
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase = False
UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCAmelCase = model_class(lowercase )
model.gradient_checkpointing_enable()
model.to(lowercase )
model.train()
UpperCAmelCase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
UpperCAmelCase = model(**lowercase ).loss
loss.backward()
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"Testing {model_class} with {problem_type['title']}" ):
UpperCAmelCase = problem_type['''title''']
UpperCAmelCase = problem_type['''num_labels''']
UpperCAmelCase = model_class(lowercase )
model.to(lowercase )
model.train()
UpperCAmelCase = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if problem_type["num_labels"] > 1:
UpperCAmelCase = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
UpperCAmelCase = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase ) as warning_list:
UpperCAmelCase = model(**lowercase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"Something is going wrong in the regression problem: intercepted {w.message}" )
loss.backward()
@slow
def A ( self : Dict ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = LevitModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def snake_case_ ():
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def A ( self : List[Any] ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowercase )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=lowercase , return_tensors='''pt''' ).to(lowercase )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**lowercase )
# verify the logits
UpperCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase )
UpperCAmelCase = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
| 700 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
A =logging.get_logger(__name__)
def snake_case_ (_a : Tuple , _a : Any=False , _a : str=False ):
UpperCAmelCase = '''backbone.''' if is_semantic else ''''''
UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"{prefix}blocks.{i}.norm1.weight", F"beit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm1.bias", F"beit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.weight", F"beit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"{prefix}blocks.{i}.attn.proj.bias", F"beit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.weight", F"beit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"{prefix}blocks.{i}.norm2.bias", F"beit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.weight", F"beit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc1.bias", F"beit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.weight", F"beit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"{prefix}blocks.{i}.mlp.fc2.bias", F"beit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
(F"{prefix}cls_token", '''beit.embeddings.cls_token'''),
(F"{prefix}patch_embed.proj.weight", '''beit.embeddings.patch_embeddings.projection.weight'''),
(F"{prefix}patch_embed.proj.bias", '''beit.embeddings.patch_embeddings.projection.bias'''),
(F"{prefix}pos_embed", '''beit.embeddings.position_embeddings'''),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('''mask_token''', '''beit.embeddings.mask_token'''),
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''),
('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case_ (_a : str , _a : List[Any] , _a : List[Any]=False , _a : Union[str, Any]=False ):
for i in range(config.num_hidden_layers ):
UpperCAmelCase = '''backbone.''' if is_semantic else ''''''
# queries, keys and values
UpperCAmelCase = state_dict.pop(F"{prefix}blocks.{i}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(F"{prefix}blocks.{i}.attn.q_bias" )
UpperCAmelCase = state_dict.pop(F"{prefix}blocks.{i}.attn.v_bias" )
UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase = q_bias
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCAmelCase = state_dict.pop(F"{prefix}blocks.{i}.gamma_1" )
UpperCAmelCase = state_dict.pop(F"{prefix}blocks.{i}.gamma_2" )
UpperCAmelCase = gamma_a
UpperCAmelCase = gamma_a
def snake_case_ (_a : Union[str, Any] , _a : str , _a : Optional[int] ):
UpperCAmelCase = dct.pop(_a )
UpperCAmelCase = val
def snake_case_ ():
UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def snake_case_ (_a : Optional[Any] , _a : Optional[Any] , _a : Optional[Any]=False ):
UpperCAmelCase = False if '''rvlcdip''' in checkpoint_url else True
UpperCAmelCase = BeitConfig(use_absolute_position_embeddings=_a , use_mask_token=_a )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCAmelCase = 1_0_2_4
UpperCAmelCase = 4_0_9_6
UpperCAmelCase = 2_4
UpperCAmelCase = 1_6
# labels
if "rvlcdip" in checkpoint_url:
UpperCAmelCase = 1_6
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = '''rvlcdip-id2label.json'''
UpperCAmelCase = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCAmelCase = torch.hub.load_state_dict_from_url(_a , map_location='''cpu''' )['''model''']
UpperCAmelCase = create_rename_keys(_a , has_lm_head=_a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_q_k_v(_a , _a , has_lm_head=_a )
# load HuggingFace model
UpperCAmelCase = BeitForMaskedImageModeling(_a ) if has_lm_head else BeitForImageClassification(_a )
model.eval()
model.load_state_dict(_a )
# Check outputs on an image
UpperCAmelCase = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_a )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_a , return_tensors='''pt''' )
UpperCAmelCase = encoding['''pixel_values''']
UpperCAmelCase = model(_a )
UpperCAmelCase = outputs.logits
# verify logits
UpperCAmelCase = [1, 1_6] if '''rvlcdip''' in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(_a ), "Shape of logits not as expected"
Path(_a ).mkdir(exist_ok=_a )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_a )
if push_to_hub:
if has_lm_head:
UpperCAmelCase = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large'''
else:
UpperCAmelCase = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip'''
image_processor.push_to_hub(
repo_path_or_name=Path(_a , _a ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=_a , )
model.push_to_hub(
repo_path_or_name=Path(_a , _a ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=_a , )
if __name__ == "__main__":
A =argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
A =parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 358 | 0 |
"""simple docstring"""
class A_ :
def __init__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = ""
_lowerCamelCase : Union[str, Any] = ""
_lowerCamelCase : Optional[int] = []
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: int ):
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_lowerCamelCase : Tuple = self.__min_dist_top_down_dp(m - 1 ,n - 1 )
else:
_lowerCamelCase : List[str] = self.__min_dist_top_down_dp(__lowerCAmelCase ,n - 1 )
_lowerCamelCase : int = self.__min_dist_top_down_dp(m - 1 ,__lowerCAmelCase )
_lowerCamelCase : int = self.__min_dist_top_down_dp(m - 1 ,n - 1 )
_lowerCamelCase : str = 1 + min(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
return self.dp[m][n]
def _lowercase ( self: int ,__lowerCAmelCase: str ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = worda
_lowerCamelCase : List[str] = worda
_lowerCamelCase : Optional[Any] = [[-1 for _ in range(len(__lowerCAmelCase ) )] for _ in range(len(__lowerCAmelCase ) )]
return self.__min_dist_top_down_dp(len(__lowerCAmelCase ) - 1 ,len(__lowerCAmelCase ) - 1 )
def _lowercase ( self: Dict ,__lowerCAmelCase: str ,__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : List[str] = worda
_lowerCamelCase : List[str] = worda
_lowerCamelCase : Optional[int] = len(__lowerCAmelCase )
_lowerCamelCase : int = len(__lowerCAmelCase )
_lowerCamelCase : List[Any] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_lowerCamelCase : Optional[Any] = j
elif j == 0: # second string is empty
_lowerCamelCase : int = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_lowerCamelCase : Union[str, Any] = self.dp[i - 1][j - 1]
else:
_lowerCamelCase : Union[str, Any] = self.dp[i][j - 1]
_lowerCamelCase : Optional[Any] = self.dp[i - 1][j]
_lowerCamelCase : Dict = self.dp[i - 1][j - 1]
_lowerCamelCase : int = 1 + min(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
return self.dp[m][n]
if __name__ == "__main__":
_lowerCAmelCase : Any = EditDistance()
print('''****************** Testing Edit Distance DP Algorithm ******************''')
print()
_lowerCAmelCase : Union[str, Any] = input('''Enter the first string: ''').strip()
_lowerCAmelCase : Tuple = input('''Enter the second string: ''').strip()
print()
print(f'''The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}''')
print(f'''The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}''')
print()
print('''*************** End of Testing Edit Distance DP Algorithm ***************''') | 46 |
"""simple docstring"""
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
return 1 if input_a == input_a else 0
def snake_case ( ) -> None:
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 103 | 0 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''CLIPImageProcessor'''
_lowerCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : int):
UpperCamelCase__ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = kwargs.pop('feature_extractor')
UpperCamelCase__ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def __call__( self : List[str] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : Any):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.')
if text is not None:
UpperCamelCase__ : Union[str, Any] = self.tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)
if images is not None:
UpperCamelCase__ : Tuple = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_)
if text is not None and images is not None:
UpperCamelCase__ : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_) , tensor_type=UpperCAmelCase_)
def __UpperCamelCase ( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[int]):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_)
def __UpperCamelCase ( self : str , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[str]):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : str = self.tokenizer.model_input_names
UpperCamelCase__ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def __UpperCamelCase ( self : int):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , UpperCAmelCase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : str):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , UpperCAmelCase_ , )
return self.image_processor
| 708 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase__ = False
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : int):
return 12
@property
def __UpperCamelCase ( self : Tuple):
return 12
@property
def __UpperCamelCase ( self : Dict):
return 32
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
return tokenizer
@property
def __UpperCamelCase ( self : List[str]):
torch.manual_seed(0)
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(UpperCAmelCase_)
@property
def __UpperCamelCase ( self : Optional[int]):
torch.manual_seed(0)
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Dict = 12
UpperCamelCase__ : Union[str, Any] = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
UpperCamelCase__ : Tuple = TransformeraDModel(**UpperCAmelCase_)
return model
def __UpperCamelCase ( self : int):
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : List[str] = self.dummy_vqvae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Optional[int] = self.dummy_tokenizer
UpperCamelCase__ : List[str] = self.dummy_transformer
UpperCamelCase__ : Dict = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase_)
UpperCamelCase__ : int = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : Optional[Any] = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Dict = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : Optional[Any] = output.images
UpperCamelCase__ : int = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : Any = np.array([0.65_51, 0.61_68, 0.50_08, 0.56_76, 0.56_59, 0.42_95, 0.60_73, 0.55_99, 0.49_92])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Optional[int] = 'cpu'
UpperCamelCase__ : str = self.dummy_vqvae
UpperCamelCase__ : Any = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = self.dummy_tokenizer
UpperCamelCase__ : Dict = self.dummy_transformer
UpperCamelCase__ : Optional[Any] = VQDiffusionScheduler(self.num_embed)
UpperCamelCase__ : Optional[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=UpperCAmelCase_ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length)
UpperCamelCase__ : str = VQDiffusionPipeline(
vqvae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , transformer=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , learned_classifier_free_sampling_embeddings=UpperCAmelCase_ , )
UpperCamelCase__ : str = pipe.to(UpperCAmelCase_)
pipe.set_progress_bar_config(disable=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = 'teddy bear playing in the pool'
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Any = pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='np')
UpperCamelCase__ : int = output.images
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : Optional[Any] = pipe(
[prompt] , generator=UpperCAmelCase_ , output_type='np' , return_dict=UpperCAmelCase_ , num_inference_steps=2)[0]
UpperCamelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
UpperCamelCase__ : str = np.array([0.66_93, 0.60_75, 0.49_59, 0.57_01, 0.55_83, 0.43_33, 0.61_71, 0.56_84, 0.49_88])
assert np.abs(image_slice.flatten() - expected_slice).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
def __UpperCamelCase ( self : Any):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy')
UpperCamelCase__ : List[Any] = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq')
UpperCamelCase__ : Any = pipeline.to(UpperCAmelCase_)
pipeline.set_progress_bar_config(disable=UpperCAmelCase_)
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
UpperCamelCase__ : Optional[int] = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
UpperCamelCase__ : int = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=UpperCAmelCase_ , output_type='np' , )
UpperCamelCase__ : int = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image).max() < 2.0
| 6 | 0 |
'''simple docstring'''
__A : Tuple = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCAmelCase_ ( a : bytes ):
# Make sure the supplied data is a bytes-like object
if not isinstance(a , a ):
a__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(a )
a__ = ''.join(bin(a )[2:].zfill(8 ) for byte in data )
a__ = len(a ) % 6 != 0
if padding_needed:
# The padding that will be added later
a__ = B'=' * ((6 - len(a ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(a ) % 6)
else:
a__ = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(a ) , 6 ) ).encode()
+ padding
)
def lowerCAmelCase_ ( a : str ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(a , a ) and not isinstance(a , a ):
a__ = (
'argument should be a bytes-like object or ASCII string, '
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(a )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(a , a ):
try:
a__ = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
a__ = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(a ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
a__ = encoded_data[:-padding]
a__ = ''.join(
bin(B64_CHARSET.index(a ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
a__ = ''.join(
bin(B64_CHARSET.index(a ) )[2:].zfill(6 ) for char in encoded_data )
a__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(a ) , 8 )
]
return bytes(a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 394 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__A : Optional[Any] = logging.getLogger(__name__)
def lowerCAmelCase_ ( ):
a__ = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=a , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=a , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=a , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=a , default=1000 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=a , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=a , type=a , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=a , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=a , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
a__ = parser.parse_args()
return args
def lowerCAmelCase_ ( a : int ):
def fn(a : int ):
return tokenizer(examples['text'] )
return fn
def lowerCAmelCase_ ( a : str ):
a__ = []
for i in range(len(tokenized_data['input_ids'] ) ):
a__ = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
a__ = tf.train.Features(feature=a )
a__ = tf.train.Example(features=a )
a__ = example.SerializeToString()
records.append(a )
return records
def lowerCAmelCase_ ( a : Any ):
a__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
a__ = min(len(a ) , args.limit )
a__ = dataset.select(range(a ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
a__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(a ):
os.makedirs(a )
else:
a__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
a__ = tokenize_function(a )
a__ = dataset.map(a , batched=a , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a : Optional[Any] ):
# Concatenate all texts.
a__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
a__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a__ = {
k: [t[i : i + args.max_length] for i in range(0 , a , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a__ = dataset_tokenized.map(a , batched=a , batch_size=1000 , num_proc=4 )
a__ = 0
a__ = 0
for shard in range(0 , len(a ) , args.shard_size ):
a__ = grouped_dataset[shard : shard + args.shard_size]
a__ = len(dataset_snapshot['input_ids'] )
a__ = os.path.join(a , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
a__ = get_serialized_examples(a )
with tf.io.TFRecordWriter(a ) as out_file:
for i in range(len(a ) ):
a__ = serialized_examples[i]
out_file.write(a )
print('Wrote file {} containing {} records'.format(a , a ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , 'w' ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=a )
if __name__ == "__main__":
__A : str = parse_args()
main(args)
| 394 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_lowerCAmelCase : List[str] = (7_2_0, 1_2_8_0) # Height, Width
_lowerCAmelCase : List[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
_lowerCAmelCase : List[Any] = 1 / 1_0_0
_lowerCAmelCase : List[Any] = ""
_lowerCAmelCase : List[str] = ""
_lowerCAmelCase : int = ""
_lowerCAmelCase : List[str] = 2_5_0
def _A ( ):
snake_case__ : List[str] = get_dataset(snake_case__ , snake_case__ )
for index in range(snake_case__ ):
snake_case__ : List[Any] = random.sample(range(len(snake_case__ ) ) , 4 )
snake_case__ : Optional[int] = update_image_and_anno(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , filter_scale=snake_case__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case__ : Tuple = random_chars(32 )
snake_case__ : str = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
snake_case__ : Dict = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' , snake_case__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
snake_case__ : Union[str, Any] = []
for anno in new_annos:
snake_case__ : Dict = anno[3] - anno[1]
snake_case__ : Union[str, Any] = anno[4] - anno[2]
snake_case__ : Dict = anno[1] + width / 2
snake_case__ : Optional[int] = anno[2] + height / 2
snake_case__ : Dict = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(snake_case__ )
with open(f'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _A ( snake_case__ : str , snake_case__ : str ):
snake_case__ : List[Any] = []
snake_case__ : Union[str, Any] = []
for label_file in glob.glob(os.path.join(snake_case__ , '''*.txt''' ) ):
snake_case__ : str = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(snake_case__ ) as in_file:
snake_case__ : int = in_file.readlines()
snake_case__ : List[str] = os.path.join(snake_case__ , f'''{label_name}.jpg''' )
snake_case__ : int = []
for obj_list in obj_lists:
snake_case__ : int = obj_list.rstrip('''\n''' ).split(''' ''' )
snake_case__ : Any = float(obj[1] ) - float(obj[3] ) / 2
snake_case__ : List[Any] = float(obj[2] ) - float(obj[4] ) / 2
snake_case__ : Union[str, Any] = float(obj[1] ) + float(obj[3] ) / 2
snake_case__ : Any = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case__ )
labels.append(snake_case__ )
return img_paths, labels
def _A ( snake_case__ : list , snake_case__ : list , snake_case__ : list[int] , snake_case__ : tuple[int, int] , snake_case__ : tuple[float, float] , snake_case__ : float = 0.0 , ):
snake_case__ : Any = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
snake_case__ : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case__ : Any = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
snake_case__ : Any = int(scale_x * output_size[1] )
snake_case__ : Tuple = int(scale_y * output_size[0] )
snake_case__ : List[str] = []
snake_case__ : List[str] = []
for i, index in enumerate(snake_case__ ):
snake_case__ : Any = all_img_list[index]
path_list.append(snake_case__ )
snake_case__ : Tuple = all_annos[index]
snake_case__ : Optional[int] = cva.imread(snake_case__ )
if i == 0: # top-left
snake_case__ : Dict = cva.resize(snake_case__ , (divid_point_x, divid_point_y) )
snake_case__ : Any = img
for bbox in img_annos:
snake_case__ : Tuple = bbox[1] * scale_x
snake_case__ : Union[str, Any] = bbox[2] * scale_y
snake_case__ : int = bbox[3] * scale_x
snake_case__ : List[str] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
snake_case__ : Tuple = cva.resize(snake_case__ , (output_size[1] - divid_point_x, divid_point_y) )
snake_case__ : Optional[int] = img
for bbox in img_annos:
snake_case__ : List[Any] = scale_x + bbox[1] * (1 - scale_x)
snake_case__ : Dict = bbox[2] * scale_y
snake_case__ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
snake_case__ : int = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
snake_case__ : Tuple = cva.resize(snake_case__ , (divid_point_x, output_size[0] - divid_point_y) )
snake_case__ : Union[str, Any] = img
for bbox in img_annos:
snake_case__ : List[Any] = bbox[1] * scale_x
snake_case__ : Dict = scale_y + bbox[2] * (1 - scale_y)
snake_case__ : int = bbox[3] * scale_x
snake_case__ : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
snake_case__ : Any = cva.resize(
snake_case__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
snake_case__ : Tuple = img
for bbox in img_annos:
snake_case__ : Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
snake_case__ : str = scale_y + bbox[2] * (1 - scale_y)
snake_case__ : List[str] = scale_x + bbox[3] * (1 - scale_x)
snake_case__ : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
snake_case__ : List[str] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _A ( snake_case__ : int ):
assert number_char > 1, "The number of character should greater than 1"
snake_case__ : Dict = ascii_lowercase + digits
return "".join(random.choice(snake_case__ ) for _ in range(snake_case__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 708 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def _A ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
snake_case__ : Optional[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 0 |
from math import factorial
def __lowerCAmelCase ( A = 100 ):
return sum(int(_SCREAMING_SNAKE_CASE ) for x in str(factorial(_SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip()))) | 162 |
'''simple docstring'''
import string
from math import logaa
def __A ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
__SCREAMING_SNAKE_CASE : Optional[Any] = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __A ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
__SCREAMING_SNAKE_CASE : List[Any] = corpus_without_punctuation.split("\n" )
__SCREAMING_SNAKE_CASE : List[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_SCREAMING_SNAKE_CASE ))
def __A ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def __A ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return round(tf * idf , 3 )
| 211 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
UpperCAmelCase__ : List[Any] ='''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ) -> Dict:
if rng is None:
lowerCamelCase =random.Random()
lowerCamelCase =1
for dim in shape:
total_dims *= dim
lowerCamelCase =[]
for _ in range(_UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
lowerCamelCase =np.array(_UpperCAmelCase , dtype=jnp.intaa ).reshape(_UpperCAmelCase )
return output
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase=None ) -> Optional[Any]:
lowerCamelCase =ids_tensor(_UpperCAmelCase , vocab_size=2 , rng=_UpperCAmelCase )
# make sure that at least one token is attended to for each batch
lowerCamelCase =1
return attn_mask
@require_flax
class __A :
__A = None
__A = ()
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
lowerCamelCase =2
lowerCamelCase =inputs["""input_ids"""].shape[-1] // 2
lowerCamelCase =inputs["""input_ids"""][:max_batch_size, :sequence_length]
lowerCamelCase =jnp.ones_like(UpperCAmelCase_ )
lowerCamelCase =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
lowerCamelCase =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
lowerCamelCase =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase =self._get_input_ids_and_config()
lowerCamelCase =False
lowerCamelCase =max_length
lowerCamelCase =0
for model_class in self.all_generative_model_classes:
lowerCamelCase =model_class(UpperCAmelCase_ )
lowerCamelCase =model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase =getattr(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase =pt_model_class(UpperCAmelCase_ ).eval()
lowerCamelCase =load_flax_weights_in_pytorch_model(UpperCAmelCase_ , flax_model.params )
lowerCamelCase =flax_model.generate(UpperCAmelCase_ ).sequences
lowerCamelCase =pt_model.generate(torch.tensor(UpperCAmelCase_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
lowerCamelCase =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase =self._get_input_ids_and_config()
lowerCamelCase =False
lowerCamelCase =max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase =model_class(UpperCAmelCase_ )
lowerCamelCase =model.generate(UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase_ )
lowerCamelCase =jit(model.generate )
lowerCamelCase =jit_generate(UpperCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase =self._get_input_ids_and_config()
lowerCamelCase =True
lowerCamelCase =max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase =model_class(UpperCAmelCase_ )
lowerCamelCase =model.generate(UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase_ )
lowerCamelCase =jit(model.generate )
lowerCamelCase =jit_generate(UpperCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase =self._get_input_ids_and_config()
lowerCamelCase =False
lowerCamelCase =max_length
lowerCamelCase =2
for model_class in self.all_generative_model_classes:
lowerCamelCase =model_class(UpperCAmelCase_ )
lowerCamelCase =model.generate(UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase_ )
lowerCamelCase =jit(model.generate )
lowerCamelCase =jit_generate(UpperCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase =self._get_input_ids_and_config()
lowerCamelCase =False
lowerCamelCase =max_length
lowerCamelCase =2
lowerCamelCase =2
for model_class in self.all_generative_model_classes:
lowerCamelCase =model_class(UpperCAmelCase_ )
lowerCamelCase =model.generate(UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase =self._get_input_ids_and_config()
lowerCamelCase =True
lowerCamelCase =max_length
lowerCamelCase =0.8
lowerCamelCase =10
lowerCamelCase =0.3
lowerCamelCase =1
lowerCamelCase =8
lowerCamelCase =9
for model_class in self.all_generative_model_classes:
lowerCamelCase =model_class(UpperCAmelCase_ )
lowerCamelCase =model.generate(UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase_ )
lowerCamelCase =jit(model.generate )
lowerCamelCase =jit_generate(UpperCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase =self._get_input_ids_and_config()
lowerCamelCase =max_length
lowerCamelCase =1
lowerCamelCase =8
lowerCamelCase =9
for model_class in self.all_generative_model_classes:
lowerCamelCase =model_class(UpperCAmelCase_ )
lowerCamelCase =model.generate(UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase_ )
lowerCamelCase =jit(model.generate )
lowerCamelCase =jit_generate(UpperCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase =self._get_input_ids_and_config()
lowerCamelCase =max_length
lowerCamelCase =2
lowerCamelCase =1
lowerCamelCase =8
lowerCamelCase =9
for model_class in self.all_generative_model_classes:
lowerCamelCase =model_class(UpperCAmelCase_ )
lowerCamelCase =model.generate(UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase_ )
lowerCamelCase =jit(model.generate )
lowerCamelCase =jit_generate(UpperCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase =self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase =attention_mask.at[(0, 0)].set(0 )
lowerCamelCase =False
lowerCamelCase =max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase =model_class(UpperCAmelCase_ )
lowerCamelCase =model.generate(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase_ )
lowerCamelCase =jit(model.generate )
lowerCamelCase =jit_generate(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase =self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase =attention_mask.at[(0, 0)].set(0 )
lowerCamelCase =True
lowerCamelCase =max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase =model_class(UpperCAmelCase_ )
lowerCamelCase =model.generate(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase_ )
lowerCamelCase =jit(model.generate )
lowerCamelCase =jit_generate(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self ):
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase =self._get_input_ids_and_config()
# pad attention mask on the left
lowerCamelCase =attention_mask.at[(0, 0)].set(0 )
lowerCamelCase =2
lowerCamelCase =max_length
for model_class in self.all_generative_model_classes:
lowerCamelCase =model_class(UpperCAmelCase_ )
lowerCamelCase =model.generate(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , UpperCAmelCase_ )
lowerCamelCase =jit(model.generate )
lowerCamelCase =jit_generate(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __A ( unittest.TestCase ):
def _snake_case ( self ):
lowerCamelCase =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
lowerCamelCase =FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
lowerCamelCase ="""Hello world"""
lowerCamelCase =tokenizer(UpperCAmelCase_ , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(UpperCAmelCase_ , """do_samples""" ):
model.generate(UpperCAmelCase_ , do_samples=UpperCAmelCase_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(UpperCAmelCase_ , """foo""" ):
lowerCamelCase ={"""foo""": """bar"""}
model.generate(UpperCAmelCase_ , **UpperCAmelCase_ )
| 269 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
UpperCAmelCase__ : Optional[int] ={
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
UpperCAmelCase__ : str ={
'''169M''': 7_68,
'''430M''': 10_24,
'''1B5''': 20_48,
'''3B''': 25_60,
'''7B''': 40_96,
'''14B''': 51_20,
}
def _lowercase ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase =list(state_dict.keys() )
for name in state_dict_keys:
lowerCamelCase =state_dict.pop(_UpperCAmelCase )
# emb -> embedding
if name.startswith("""emb.""" ):
lowerCamelCase =name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
lowerCamelCase =name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
lowerCamelCase =re.sub(r"""blocks\.(\d+)\.att""" , r"""blocks.\1.attention""" , _UpperCAmelCase )
# ffn -> feed_forward
lowerCamelCase =re.sub(r"""blocks\.(\d+)\.ffn""" , r"""blocks.\1.feed_forward""" , _UpperCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
lowerCamelCase =name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
lowerCamelCase =name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
lowerCamelCase =name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
lowerCamelCase ="""rwkv.""" + name
lowerCamelCase =weight
return state_dict
def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=None ) -> Tuple:
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
lowerCamelCase =5_02_77
lowerCamelCase =AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
lowerCamelCase =PreTrainedTokenizerFast(tokenizer_file=_UpperCAmelCase )
lowerCamelCase =len(_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
# 2. Build the config
lowerCamelCase =list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowerCamelCase =candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
lowerCamelCase =RwkvConfig(
vocab_size=_UpperCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_UpperCAmelCase )
# 3. Download model file then convert state_dict
lowerCamelCase =hf_hub_download(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase =torch.load(_UpperCAmelCase , map_location="""cpu""" )
lowerCamelCase =convert_state_dict(_UpperCAmelCase )
# 4. Split in shards and save
lowerCamelCase , lowerCamelCase =shard_checkpoint(_UpperCAmelCase )
for shard_file, shard in shards.items():
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if index is not None:
lowerCamelCase =os.path.join(_UpperCAmelCase , _UpperCAmelCase )
# Save the index as well
with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
lowerCamelCase =json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + """\n"""
f.write(_UpperCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
lowerCamelCase =list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowerCamelCase =torch.load(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
lowerCamelCase =AutoModelForCausalLM.from_pretrained(_UpperCAmelCase )
model.push_to_hub(_UpperCAmelCase , max_shard_size="""2GB""" )
tokenizer.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
UpperCAmelCase__ : List[Any] =parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 269 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : str = 16
lowerCAmelCase : List[Any] = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_: Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: str = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: Tuple = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: int = 8
else:
SCREAMING_SNAKE_CASE_: Any = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : Optional[int] = mocked_dataloaders # noqa: F811
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCAmelCase ) == "1":
SCREAMING_SNAKE_CASE_: Tuple = 2
# New Code #
SCREAMING_SNAKE_CASE_: List[str] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE_: int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: Tuple = config["lr"]
SCREAMING_SNAKE_CASE_: List[str] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: List[str] = int(config["seed"] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: str = evaluate.load("glue" , "mrpc" )
set_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: List[Any] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Union[str, Any] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: str = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = output.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[Any] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase )
def A_ ( ):
SCREAMING_SNAKE_CASE_: str = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=_UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_: List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_: Tuple = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 671 |
from math import asin, atan, cos, radians, sin, sqrt, tan
lowerCAmelCase : Union[str, Any] = 637_8137.0
lowerCAmelCase : int = 635_6752.31_4245
lowerCAmelCase : Union[str, Any] = 6378137
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = (AXIS_A - AXIS_B) / AXIS_A
SCREAMING_SNAKE_CASE_: str = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_: Optional[int] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_: Any = radians(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = radians(_UpperCAmelCase )
# Equation
SCREAMING_SNAKE_CASE_: str = sin((phi_a - phi_a) / 2 )
SCREAMING_SNAKE_CASE_: List[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
SCREAMING_SNAKE_CASE_: Tuple = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 1 |
"""simple docstring"""
import random
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : Optional[Any] ) ->tuple:
'''simple docstring'''
a : List[Any] = [], [], []
for element in data:
if element < pivot:
less.append(_lowercase )
elif element > pivot:
greater.append(_lowercase )
else:
equal.append(_lowercase )
return less, equal, greater
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : int ) ->Dict:
'''simple docstring'''
if index >= len(_lowercase ) or index < 0:
return None
a : Optional[int] = items[random.randint(0 , len(_lowercase ) - 1 )]
a : Dict = 0
a : str = _partition(_lowercase , _lowercase )
a : List[Any] = len(_lowercase )
a : int = len(_lowercase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowercase , _lowercase )
# must be in larger
else:
return quick_select(_lowercase , index - (m + count) )
| 714 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=a__ ):
lowerCamelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 31 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class _a ( lowercase_ ):
a_ : Any = '''mobilenet_v2'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : List[Any]=2_24 , SCREAMING_SNAKE_CASE__ : str=1.0 , SCREAMING_SNAKE_CASE__ : str=8 , SCREAMING_SNAKE_CASE__ : Any=8 , SCREAMING_SNAKE_CASE__ : Optional[int]=6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=32 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]="relu6" , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : str=0.8 , SCREAMING_SNAKE_CASE__ : int=0.02 , SCREAMING_SNAKE_CASE__ : Tuple=0.0_01 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_55 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
lowerCamelCase__ = num_channels
lowerCamelCase__ = image_size
lowerCamelCase__ = depth_multiplier
lowerCamelCase__ = depth_divisible_by
lowerCamelCase__ = min_depth
lowerCamelCase__ = expand_ratio
lowerCamelCase__ = output_stride
lowerCamelCase__ = first_layer_is_expansion
lowerCamelCase__ = finegrained_output
lowerCamelCase__ = hidden_act
lowerCamelCase__ = tf_padding
lowerCamelCase__ = classifier_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = semantic_loss_ignore_index
class _a ( lowercase_ ):
a_ : Any = version.parse('1.11' )
@property
def _UpperCamelCase ( self : Dict ):
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def _UpperCamelCase ( self : str ):
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def _UpperCamelCase ( self : Union[str, Any] ):
return 1e-4
| 510 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase__ ( a__) -> bool:
"""simple docstring"""
return len(set(a__)) == len(a__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 517 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : List[str] = StableUnCLIPImgaImgPipeline
UpperCamelCase_ : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
UpperCamelCase_ : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase_ : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase_ : Tuple = frozenset([] )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = 32
lowerCAmelCase__ = embedder_hidden_size
# image encoding components
lowerCAmelCase__ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=snake_case__ , projection_dim=snake_case__ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase__ = StableUnCLIPImageNormalizer(embedding_dim=snake_case__ )
lowerCAmelCase__ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case__ , layers_per_block=1 , upcast_attention=snake_case__ , use_linear_projection=snake_case__ , )
torch.manual_seed(0 )
lowerCAmelCase__ = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=snake_case__ , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL()
lowerCAmelCase__ = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def _SCREAMING_SNAKE_CASE ( self : Tuple , snake_case__ : List[str] , snake_case__ : Union[str, Any]=0 , snake_case__ : Any=True ):
if str(snake_case__ ).startswith("""mps""" ):
lowerCAmelCase__ = torch.manual_seed(snake_case__ )
else:
lowerCAmelCase__ = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
lowerCAmelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
if pil_image:
lowerCAmelCase__ = input_image * 0.5 + 0.5
lowerCAmelCase__ = input_image.clamp(0 , 1 )
lowerCAmelCase__ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase__ = DiffusionPipeline.numpy_to_pil(snake_case__ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline(**snake_case__ )
lowerCAmelCase__ = sd_pipe.to(snake_case__ )
sd_pipe.set_progress_bar_config(disable=snake_case__ )
lowerCAmelCase__ = self.get_dummy_inputs(snake_case__ )
inputs.update({"""image_embeds""": None} )
lowerCAmelCase__ = sd_pipe(**snake_case__ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=snake_case__ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=snake_case__ )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
lowerCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase__ = pipe(snake_case__ , """anime turle""" , generator=snake_case__ , output_type="""np""" )
lowerCAmelCase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
lowerCAmelCase__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase__ = pipe(snake_case__ , """anime turle""" , generator=snake_case__ , output_type="""np""" )
lowerCAmelCase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ = pipe(
snake_case__ , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
lowerCAmelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 674 | """simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__lowerCAmelCase : Union[str, Any] = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
__lowerCAmelCase : Optional[int] = json.load(f)
@require_torch
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , snake_case__ : Dict ):
return FSMTTokenizer.from_pretrained(snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any ):
lowerCAmelCase__ = FSMTForConditionalGeneration.from_pretrained(snake_case__ ).to(snake_case__ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict , snake_case__ : Any , snake_case__ : int ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowerCAmelCase__ = F"""facebook/wmt19-{pair}"""
lowerCAmelCase__ = self.get_tokenizer(snake_case__ )
lowerCAmelCase__ = self.get_model(snake_case__ )
lowerCAmelCase__ = bleu_data[pair]["""src"""]
lowerCAmelCase__ = bleu_data[pair]["""tgt"""]
lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""pt""" , truncation=snake_case__ , padding="""longest""" ).to(snake_case__ )
lowerCAmelCase__ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase__ = tokenizer.batch_decode(
snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
lowerCAmelCase__ = calculate_bleu(snake_case__ , snake_case__ )
print(snake_case__ )
self.assertGreaterEqual(scores["""bleu"""] , snake_case__ )
| 674 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : Optional[Any] = parent
__A : int = batch_size
__A : Optional[int] = seq_length
__A : Union[str, Any] = is_training
__A : Dict = use_input_mask
__A : Optional[Any] = use_token_type_ids
__A : int = use_labels
__A : int = vocab_size
__A : str = hidden_size
__A : Optional[int] = num_hidden_layers
__A : Any = num_attention_heads
__A : Optional[Any] = intermediate_size
__A : Tuple = hidden_act
__A : Any = hidden_dropout_prob
__A : Dict = attention_probs_dropout_prob
__A : Any = max_position_embeddings
__A : List[Any] = type_vocab_size
__A : Optional[int] = type_sequence_label_size
__A : str = initializer_range
__A : Optional[Any] = num_labels
__A : Any = num_choices
__A : List[str] = scope
__A : Optional[Any] = vocab_size - 1
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : List[str] = None
if self.use_input_mask:
__A : str = random_attention_mask([self.batch_size, self.seq_length])
__A : Union[str, Any] = None
if self.use_labels:
__A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case__ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : Tuple = self.prepare_config_and_inputs()
__A : List[Any] = True
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = GPTNeoXModel(config=snake_case__)
model.to(snake_case__)
model.eval()
__A : Any = model(snake_case__ , attention_mask=snake_case__)
__A : List[str] = model(snake_case__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = True
__A : int = GPTNeoXModel(snake_case__)
model.to(snake_case__)
model.eval()
__A : Dict = model(snake_case__ , attention_mask=snake_case__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Union[str, Any] = GPTNeoXForCausalLM(config=snake_case__)
model.to(snake_case__)
model.eval()
__A : Any = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = self.num_labels
__A : Optional[int] = GPTNeoXForQuestionAnswering(snake_case__)
model.to(snake_case__)
model.eval()
__A : List[str] = model(snake_case__ , attention_mask=snake_case__)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = self.num_labels
__A : Optional[Any] = GPTNeoXForSequenceClassification(snake_case__)
model.to(snake_case__)
model.eval()
__A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : str = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : str = self.num_labels
__A : Any = GPTNeoXForTokenClassification(snake_case__)
model.to(snake_case__)
model.eval()
__A : int = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = True
__A : Optional[int] = GPTNeoXForCausalLM(config=snake_case__)
model.to(snake_case__)
model.eval()
# first forward pass
__A : List[Any] = model(snake_case__ , attention_mask=snake_case__ , use_cache=snake_case__)
__A : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__A : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size)
__A : str = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__A : Dict = torch.cat([input_ids, next_tokens] , dim=-1)
__A : Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1)
__A : int = model(snake_case__ , attention_mask=snake_case__ , output_hidden_states=snake_case__)
__A : Optional[int] = output_from_no_past['hidden_states'][0]
__A : Union[str, Any] = model(
snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ , output_hidden_states=snake_case__ , )['hidden_states'][0]
# select random slice
__A : int = ids_tensor((1,) , output_from_past.shape[-1]).item()
__A : str = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.prepare_config_and_inputs()
__A ,__A ,__A ,__A : Dict = config_and_inputs
__A : str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
lowerCAmelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = GPTNeoXModelTester(self)
__A : int = ConfigTester(self , config_class=snake_case__ , hidden_size=64 , num_attention_heads=8)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case__ , snake_case__ , snake_case__)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case__ , snake_case__ , snake_case__)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : str = self.model_tester.prepare_config_and_inputs_for_decoder()
__A : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(snake_case__ , snake_case__ , snake_case__)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A ,__A ,__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case__ , snake_case__ , snake_case__)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case__)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case__)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case__)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case__)
@unittest.skip(reason='Feed forward chunking is not implemented')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A ,__A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__A : List[str] = ids_tensor([1, 10] , config.vocab_size)
__A : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
__A : str = GPTNeoXModel(snake_case__)
original_model.to(snake_case__)
original_model.eval()
__A : Any = original_model(snake_case__).last_hidden_state
__A : Tuple = original_model(snake_case__).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
__A : List[Any] = {'type': scaling_type, 'factor': 10.0}
__A : str = GPTNeoXModel(snake_case__)
scaled_model.to(snake_case__)
scaled_model.eval()
__A : Dict = scaled_model(snake_case__).last_hidden_state
__A : Any = scaled_model(snake_case__).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-5))
else:
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case__ , snake_case__ , atol=1e-5))
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = AutoTokenizer.from_pretrained('EleutherAI/pythia-410m-deduped')
for checkpointing in [True, False]:
__A : Optional[Any] = GPTNeoXForCausalLM.from_pretrained('EleutherAI/pythia-410m-deduped')
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(snake_case__)
__A : int = tokenizer('My favorite food is' , return_tensors='pt').to(snake_case__)
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__A : str = 'My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'
__A : int = model.generate(**snake_case__ , do_sample=snake_case__ , max_new_tokens=20)
__A : Optional[Any] = tokenizer.batch_decode(snake_case__)[0]
self.assertEqual(snake_case__ , snake_case__) | 8 | import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def _snake_case ( *snake_case__ : Optional[int] , **snake_case__ : Tuple ) -> Optional[Any]:
pass
def lowerCamelCase ( UpperCamelCase : str ) -> Tuple:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
A = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _snake_case ( self : str , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Union[str, Any] ) -> Dict:
_lowerCamelCase = pipeline(
'document-question-answering' , model=snake_case__ , tokenizer=snake_case__ , image_processor=snake_case__ )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = list(zip(*apply_tesseract(load_image(snake_case__ ) , snake_case__ , '' ) ) )
_lowerCamelCase = 'What is the placebo?'
_lowerCamelCase = [
{
'image': load_image(snake_case__ ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def _snake_case ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ) -> Tuple:
_lowerCamelCase = dqa_pipeline(snake_case__ , top_k=2 )
self.assertEqual(
snake_case__ , [
[
{'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ ), 'start': ANY(snake_case__ ), 'end': ANY(snake_case__ )},
{'score': ANY(snake_case__ ), 'answer': ANY(snake_case__ ), 'start': ANY(snake_case__ ), 'end': ANY(snake_case__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : Union[str, Any] ) -> Optional[int]:
_lowerCamelCase = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = 'How many cats are there?'
_lowerCamelCase = [
{'score': 0.0001, 'answer': 'oy 2312/2019', 'start': 3_8, 'end': 3_9},
{'score': 0.0001, 'answer': 'oy 2312/2019 DUE', 'start': 3_8, 'end': 4_0},
]
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(nested_simplify(snake_case__ , decimals=4 ) , snake_case__ )
_lowerCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(snake_case__ , decimals=4 ) , snake_case__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_lowerCamelCase = './tests/fixtures/tests_samples/COCO/000000039769.png'
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(snake_case__ , [] )
# We can optionnally pass directly the words and bounding boxes
_lowerCamelCase = './tests/fixtures/tests_samples/COCO/000000039769.png'
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , words=snake_case__ , boxes=snake_case__ , top_k=2 )
self.assertEqual(snake_case__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : Optional[int] ) -> List[Any]:
_lowerCamelCase = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = 'What is the invoice number?'
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
_lowerCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
_lowerCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{'score': 0.9944, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0009, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self : int ) -> Optional[Any]:
_lowerCamelCase = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=5_0 , )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = 'What is the invoice number?'
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
_lowerCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
_lowerCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{'score': 0.9974, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
{'score': 0.9948, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : int ) -> List[Any]:
_lowerCamelCase = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=snake_case__ )
_lowerCamelCase = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=snake_case__ , revision='3dc6de3' , )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = 'What is the invoice number?'
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
_lowerCamelCase = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
_lowerCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
]
]
* 2 , )
_lowerCamelCase = list(zip(*apply_tesseract(load_image(snake_case__ ) , snake_case__ , '' ) ) )
# This model should also work if `image` is set to None
_lowerCamelCase = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.4251, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.0819, 'answer': '1110212019', 'start': 2_3, 'end': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self : Dict ) -> List[str]:
_lowerCamelCase = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=snake_case__ )
_lowerCamelCase = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=snake_case__ , revision='3dc6de3' , max_seq_len=5_0 , )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = 'What is the invoice number?'
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
_lowerCamelCase = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
[
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
]
]
* 2 , )
_lowerCamelCase = list(zip(*apply_tesseract(load_image(snake_case__ ) , snake_case__ , '' ) ) )
# This model should also work if `image` is set to None
_lowerCamelCase = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(snake_case__ , decimals=4 ) , [
{'score': 0.9999, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
{'score': 0.9998, 'answer': 'us-001', 'start': 1_6, 'end': 1_6},
] , )
@slow
@require_torch
def _snake_case ( self : Dict ) -> int:
_lowerCamelCase = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
_lowerCamelCase = INVOICE_URL
_lowerCamelCase = 'What is the invoice number?'
_lowerCamelCase = dqa_pipeline(image=snake_case__ , question=snake_case__ , top_k=2 )
self.assertEqual(nested_simplify(snake_case__ , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def _snake_case ( self : Optional[Any] ) -> Any:
pass | 544 | 0 |
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
lowercase__ : Optional[int] = 0
lowercase__ : str = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
lowercase__ : Dict = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
lowercase__ : Union[str, Any] = 1
for i in range(0 ,len(SCREAMING_SNAKE_CASE_ ) ):
total *= numbers[i]
lowercase__ : Dict = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
lowercase__ : Dict = 0
lowercase__ : List[str] = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
lowercase__ : str = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
lowercase__ : Optional[int] = 0
for i in range(0 ,len(SCREAMING_SNAKE_CASE_ ) ):
total += numbers[i]
lowercase__ : Union[str, Any] = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod() | 298 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a : int = logging.get_logger(__name__)
__a : Tuple = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a : Optional[Any] = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__a : Any = {'''facebook/blenderbot_small-90M''': 5_1_2}
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowercase__ : str = set()
lowercase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Any = char
lowercase__ : int = set(SCREAMING_SNAKE_CASE_ )
return pairs
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Tuple = VOCAB_FILES_NAMES
a : Dict = PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : str = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="__start__" , lowerCamelCase="__end__" , lowerCamelCase="__unk__" , lowerCamelCase="__null__" , **lowerCamelCase , ) -> List[str]:
"""simple docstring"""
super().__init__(unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , **lowerCamelCase )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
lowercase__ : Optional[int] = json.load(lowerCamelCase )
lowercase__ : List[Any] = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
lowercase__ : Any = merges_handle.read().split("\n" )[1:-1]
lowercase__ : Dict = [tuple(merge.split() ) for merge in merges]
lowercase__ : Any = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
lowercase__ : Dict = {}
@property
def __a ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self , lowerCamelCase ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ : str = re.sub("([.,!?()])" , r" \1" , lowerCamelCase )
lowercase__ : Dict = re.sub("(')" , r" \1 " , lowerCamelCase )
lowercase__ : Union[str, Any] = re.sub(r"\s{2,}" , " " , lowerCamelCase )
if "\n" in token:
lowercase__ : Optional[Any] = token.replace("\n" , " __newln__" )
lowercase__ : Optional[Any] = token.split(" " )
lowercase__ : Union[str, Any] = []
for token in tokens:
if not len(lowerCamelCase ):
continue
lowercase__ : Union[str, Any] = token.lower()
lowercase__ : Any = tuple(lowerCamelCase )
lowercase__ : Tuple = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
lowercase__ : Optional[int] = get_pairs(lowerCamelCase )
if not pairs:
words.append(lowerCamelCase )
continue
while True:
lowercase__ : str = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : int = bigram
lowercase__ : str = []
lowercase__ : int = 0
while i < len(lowerCamelCase ):
try:
lowercase__ : List[str] = word.index(lowerCamelCase , lowerCamelCase )
new_word.extend(word[i:j] )
lowercase__ : Tuple = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : int = tuple(lowerCamelCase )
lowercase__ : Tuple = new_word
if len(lowerCamelCase ) == 1:
break
else:
lowercase__ : Optional[Any] = get_pairs(lowerCamelCase )
lowercase__ : Tuple = "@@ ".join(lowerCamelCase )
lowercase__ : Optional[Any] = word[:-4]
lowercase__ : int = word
words.append(lowerCamelCase )
return " ".join(lowerCamelCase )
def __a ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = []
lowercase__ : Dict = re.findall(r"\S+\n?" , lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase ).split(" " ) ) )
return split_tokens
def __a ( self , lowerCamelCase ) -> int:
"""simple docstring"""
lowercase__ : Optional[Any] = token.lower()
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def __a ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.decoder.get(lowerCamelCase , self.unk_token )
def __a ( self , lowerCamelCase ) -> str:
"""simple docstring"""
lowercase__ : Optional[Any] = " ".join(lowerCamelCase ).replace("@@ " , "" ).strip()
return out_string
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : str = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
lowercase__ : List[Any] = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowercase__ : str = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file | 298 | 1 |
from __future__ import annotations
from fractions import Fraction
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Any = []
__UpperCamelCase :Optional[int] = 11
__UpperCamelCase :Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
solutions.append(f"""{num}/{den}""" )
den += 1
num += 1
__UpperCamelCase :Optional[Any] = 10
return solutions
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 2 ):
'''simple docstring'''
__UpperCamelCase :str = 1.0
for fraction in fraction_list(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = Fraction(SCREAMING_SNAKE_CASE )
result *= frac.denominator / frac.numerator
return int(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution())
| 167 | from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowercase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BICUBIC , __lowercase = True , __lowercase = None , __lowercase = True , __lowercase = 1 / 255 , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = True , **__lowercase , ) -> None:
super().__init__(**__lowercase)
__UpperCamelCase :str = size if size is not None else {'''shortest_edge''': 224}
__UpperCamelCase :Tuple = get_size_dict(__lowercase , default_to_square=__lowercase)
__UpperCamelCase :List[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__UpperCamelCase :Optional[int] = get_size_dict(__lowercase , default_to_square=__lowercase , param_name='''crop_size''')
__UpperCamelCase :List[str] = do_resize
__UpperCamelCase :Any = size
__UpperCamelCase :Dict = resample
__UpperCamelCase :List[Any] = do_center_crop
__UpperCamelCase :Any = crop_size
__UpperCamelCase :Any = do_rescale
__UpperCamelCase :Optional[Any] = rescale_factor
__UpperCamelCase :List[str] = do_normalize
__UpperCamelCase :List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCamelCase :int = image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCamelCase :Tuple = do_convert_rgb
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray:
__UpperCamelCase :Union[str, Any] = get_size_dict(__lowercase , default_to_square=__lowercase)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
__UpperCamelCase :Tuple = get_resize_output_image_size(__lowercase , size=size['''shortest_edge'''] , default_to_square=__lowercase)
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
__UpperCamelCase :Optional[Any] = get_size_dict(__lowercase)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""")
return center_crop(__lowercase , size=(size['''height'''], size['''width''']) , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> List[Any]:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image:
__UpperCamelCase :List[Any] = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase :Optional[int] = size if size is not None else self.size
__UpperCamelCase :int = get_size_dict(__lowercase , param_name='''size''' , default_to_square=__lowercase)
__UpperCamelCase :str = resample if resample is not None else self.resample
__UpperCamelCase :List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase :Dict = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase :Dict = get_size_dict(__lowercase , param_name='''crop_size''' , default_to_square=__lowercase)
__UpperCamelCase :Dict = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase :str = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase :Tuple = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase :Dict = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase :List[Any] = image_std if image_std is not None else self.image_std
__UpperCamelCase :List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCamelCase :Tuple = make_list_of_images(__lowercase)
if not valid_images(__lowercase):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCamelCase :Optional[int] = [convert_to_rgb(__lowercase) for image in images]
# All transformations expect numpy arrays.
__UpperCamelCase :Optional[int] = [to_numpy_array(__lowercase) for image in images]
if do_resize:
__UpperCamelCase :List[Any] = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase) for image in images]
if do_center_crop:
__UpperCamelCase :List[Any] = [self.center_crop(image=__lowercase , size=__lowercase) for image in images]
if do_rescale:
__UpperCamelCase :str = [self.rescale(image=__lowercase , scale=__lowercase) for image in images]
if do_normalize:
__UpperCamelCase :Union[str, Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase) for image in images]
__UpperCamelCase :str = [to_channel_dimension_format(__lowercase , __lowercase) for image in images]
__UpperCamelCase :Tuple = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase)
| 167 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaControlnetPipeline
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowercase (self ):
"""simple docstring"""
return 1_00
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def _lowercase (self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.dummy_unet
SCREAMING_SNAKE_CASE_ = self.dummy_movq
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
# create hint
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE_ ) ).float() / 2_55.0
SCREAMING_SNAKE_CASE_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipeline(
image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , hint=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) | 628 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class snake_case ( __lowercase ):
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ = 8
# DPR tok
SCREAMING_SNAKE_CASE_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
SCREAMING_SNAKE_CASE_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
SCREAMING_SNAKE_CASE_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE_ = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def _lowercase (self ):
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def _lowercase (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowercase (self , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''dataset''' )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , SCREAMING_SNAKE_CASE_ ) , )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
SCREAMING_SNAKE_CASE_ = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(SCREAMING_SNAKE_CASE_ , open(SCREAMING_SNAKE_CASE_ , '''wb''' ) )
SCREAMING_SNAKE_CASE_ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
SCREAMING_SNAKE_CASE_ = RagRetriever(
SCREAMING_SNAKE_CASE_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
SCREAMING_SNAKE_CASE_ = self.get_dummy_dataset()
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever.retrieve(SCREAMING_SNAKE_CASE_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_canonical_hf_index_retriever()
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
SCREAMING_SNAKE_CASE_ = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_dpr_ctx_encoder_tokenizer()
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = self.get_dummy_custom_hf_index_retriever(from_disk=SCREAMING_SNAKE_CASE_ )
retriever.set_ctx_encoder_tokenizer(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = [[5, 7], [10, 11]]
SCREAMING_SNAKE_CASE_ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
SCREAMING_SNAKE_CASE_ = retriever(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prefix=retriever.config.generator.prefix , n_docs=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
len(SCREAMING_SNAKE_CASE_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , SCREAMING_SNAKE_CASE_ ) # check for doc token related keys in dictionary. | 628 | 1 |
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
snake_case_ = [p / w for p, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
snake_case_ = sorted(UpperCamelCase__ )
# declaring useful variables
snake_case_ = len(UpperCamelCase__ )
snake_case_ = 0
snake_case_ = 0
snake_case_ = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
snake_case_ = sorted_profit_by_weight[length - i - 1]
snake_case_ = profit_by_weight.index(UpperCamelCase__ )
snake_case_ = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
_UpperCAmelCase : Any = [int(x) for x in input("""Input profits separated by spaces: """).split()]
_UpperCAmelCase : Optional[Any] = [int(x) for x in input("""Input weights separated by spaces: """).split()]
_UpperCAmelCase : str = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 362 |
from math import pow, sqrt
def _SCREAMING_SNAKE_CASE ( *SCREAMING_SNAKE_CASE :float ) -> bool:
__lowerCAmelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE ) > 0 and all(value > 0.0 for value in values )
return result
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
) | 504 | 0 |
"""simple docstring"""
import qiskit
def lowercase ( __UpperCamelCase , __UpperCamelCase ) -> qiskit.result.counts.Counts:
__magic_name__ = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__magic_name__ = qiskit.QuantumCircuit(__UpperCamelCase , __UpperCamelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__magic_name__ = qiskit.execute(__UpperCamelCase , __UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__UpperCamelCase )
if __name__ == "__main__":
__lowerCamelCase = single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 714 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 190 | 0 |
def snake_case__ ( ):
return 1
def snake_case__ ( UpperCAmelCase : int ):
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def snake_case__ ( UpperCAmelCase : int ):
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_lowercase )
def snake_case__ ( UpperCAmelCase : int ):
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(_lowercase )
def snake_case__ ( UpperCAmelCase : int ):
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(_lowercase )
def snake_case__ ( UpperCAmelCase : int ):
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(_lowercase )
def snake_case__ ( UpperCAmelCase : int ):
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(_lowercase )
def snake_case__ ( UpperCAmelCase : int ):
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(_lowercase )
def snake_case__ ( UpperCAmelCase : int = 2_0_0 ):
return two_pound(_lowercase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 145 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class snake_case__ :
@staticmethod
def __lowerCAmelCase ( *lowercase : Any , **lowercase : str ):
'''simple docstring'''
pass
def lowercase_ ( _lowercase : Image ):
'''simple docstring'''
UpperCAmelCase : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowercase_ ( _lowercase : Image ):
'''simple docstring'''
UpperCAmelCase : int = np.array(_lowercase )
UpperCAmelCase : Union[str, Any] = npimg.shape
return {"hash": hashimage(_lowercase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class snake_case__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
SCREAMING_SNAKE_CASE__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __lowerCAmelCase ( self : List[Any] , lowercase : List[str] , lowercase : Dict , lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=lowercase , image_processor=lowercase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __lowerCAmelCase ( self : Optional[Any] , lowercase : Dict , lowercase : Dict ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
pass
@slow
@require_torch
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : Any = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
UpperCAmelCase : str = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=2_56 )
# Shortening by hashing
UpperCAmelCase : Optional[Any] = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(lowercase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.0_2_1},
{"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0_0_5_3},
{"mask": {"hash": "e2d0b7a0b7", "shape": (4_80, 6_40)}, "scores": 0.9_9_6_7},
{"mask": {"hash": "453c7844bd", "shape": (4_80, 6_40)}, "scores": 0.9_9_3},
{"mask": {"hash": "3d44f2926d", "shape": (4_80, 6_40)}, "scores": 0.9_9_0_9},
{"mask": {"hash": "64033ddc3f", "shape": (4_80, 6_40)}, "scores": 0.9_8_7_9},
{"mask": {"hash": "801064ff79", "shape": (4_80, 6_40)}, "scores": 0.9_8_3_4},
{"mask": {"hash": "6172f276ef", "shape": (4_80, 6_40)}, "scores": 0.9_7_1_6},
{"mask": {"hash": "b49e60e084", "shape": (4_80, 6_40)}, "scores": 0.9_6_1_2},
{"mask": {"hash": "a811e775fd", "shape": (4_80, 6_40)}, "scores": 0.9_5_9_9},
{"mask": {"hash": "a6a8ebcf4b", "shape": (4_80, 6_40)}, "scores": 0.9_5_5_2},
{"mask": {"hash": "9d8257e080", "shape": (4_80, 6_40)}, "scores": 0.9_5_3_2},
{"mask": {"hash": "32de6454a8", "shape": (4_80, 6_40)}, "scores": 0.9_5_1_6},
{"mask": {"hash": "af3d4af2c8", "shape": (4_80, 6_40)}, "scores": 0.9_4_9_9},
{"mask": {"hash": "3c6db475fb", "shape": (4_80, 6_40)}, "scores": 0.9_4_8_3},
{"mask": {"hash": "c290813fb9", "shape": (4_80, 6_40)}, "scores": 0.9_4_6_4},
{"mask": {"hash": "b6f0b8f606", "shape": (4_80, 6_40)}, "scores": 0.9_4_3},
{"mask": {"hash": "92ce16bfdf", "shape": (4_80, 6_40)}, "scores": 0.9_4_3},
{"mask": {"hash": "c749b25868", "shape": (4_80, 6_40)}, "scores": 0.9_4_0_8},
{"mask": {"hash": "efb6cab859", "shape": (4_80, 6_40)}, "scores": 0.9_3_3_5},
{"mask": {"hash": "1ff2eafb30", "shape": (4_80, 6_40)}, "scores": 0.9_3_2_6},
{"mask": {"hash": "788b798e24", "shape": (4_80, 6_40)}, "scores": 0.9_2_6_2},
{"mask": {"hash": "abea804f0e", "shape": (4_80, 6_40)}, "scores": 0.8_9_9_9},
{"mask": {"hash": "7b9e8ddb73", "shape": (4_80, 6_40)}, "scores": 0.8_9_8_6},
{"mask": {"hash": "cd24047c8a", "shape": (4_80, 6_40)}, "scores": 0.8_9_8_4},
{"mask": {"hash": "6943e6bcbd", "shape": (4_80, 6_40)}, "scores": 0.8_8_7_3},
{"mask": {"hash": "b5f47c9191", "shape": (4_80, 6_40)}, "scores": 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = "facebook/sam-vit-huge"
UpperCAmelCase : Optional[int] = pipeline("mask-generation" , model=lowercase )
UpperCAmelCase : Dict = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
UpperCAmelCase : str = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(lowercase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (4_80, 6_40)}, "scores": 1.0_4_4_4},
{"mask": {"hash": "6affa964c6", "shape": (4_80, 6_40)}, "scores": 1.0_2_1_0},
{"mask": {"hash": "dfe28a0388", "shape": (4_80, 6_40)}, "scores": 1.0_1_6_7},
{"mask": {"hash": "c0a5f4a318", "shape": (4_80, 6_40)}, "scores": 1.0_1_3_2},
{"mask": {"hash": "fe8065c197", "shape": (4_80, 6_40)}, "scores": 1.0_0_5_3},
] , )
| 595 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : torch.FloatTensor
_lowerCAmelCase : torch.FloatTensor
_lowerCAmelCase : Optional[torch.FloatTensor] = None
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
_lowerCAmelCase : Tuple = 2
@register_to_config
def __init__( self , lowercase__ = 0.0_2 , lowercase__ = 1_0_0 , lowercase__ = 1.0_0_7 , lowercase__ = 8_0 , lowercase__ = 0.0_5 , lowercase__ = 5_0 , ):
# standard deviation of the initial noise distribution
__UpperCAmelCase : str = sigma_max
# setable values
__UpperCAmelCase : int = None
__UpperCAmelCase : np.IntTensor = None
__UpperCAmelCase : torch.FloatTensor = None # sigma(t_i)
def A( self , lowercase__ , lowercase__ = None):
return sample
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : str = num_inference_steps
__UpperCAmelCase : Optional[int] = np.arange(0 , self.num_inference_steps)[::-1].copy()
__UpperCAmelCase : List[Any] = torch.from_numpy(lowercase__).to(lowercase__)
__UpperCAmelCase : List[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__UpperCAmelCase : Any = torch.tensor(lowercase__ , dtype=torch.floataa , device=lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ = None):
if self.config.s_min <= sigma <= self.config.s_max:
__UpperCAmelCase : Optional[Any] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1)
else:
__UpperCAmelCase : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
__UpperCAmelCase : Any = self.config.s_noise * randn_tensor(sample.shape , generator=lowercase__).to(sample.device)
__UpperCAmelCase : str = sigma + gamma * sigma
__UpperCAmelCase : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = True , ):
__UpperCAmelCase : Dict = sample_hat + sigma_hat * model_output
__UpperCAmelCase : List[str] = (sample_hat - pred_original_sample) / sigma_hat
__UpperCAmelCase : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowercase__ , derivative=lowercase__ , pred_original_sample=lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = True , ):
__UpperCAmelCase : Optional[int] = sample_prev + sigma_prev * model_output
__UpperCAmelCase : Optional[Any] = (sample_prev - pred_original_sample) / sigma_prev
__UpperCAmelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowercase__ , derivative=lowercase__ , pred_original_sample=lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__):
raise NotImplementedError()
| 675 |
from string import ascii_uppercase
lowerCAmelCase = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase = dict(enumerate(ascii_uppercase))
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[Any] = len(lowercase_ )
__UpperCAmelCase : int = 0
while True:
if x == i:
__UpperCAmelCase : List[str] = 0
if len(lowercase_ ) == len(lowercase_ ):
break
key += key[i]
i += 1
return key
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = ''''''
__UpperCAmelCase : List[str] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__UpperCAmelCase : Optional[int] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ''''''
__UpperCAmelCase : List[str] = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__UpperCAmelCase : int = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = '''THE GERMAN ATTACK'''
__UpperCAmelCase : List[Any] = '''SECRET'''
__UpperCAmelCase : Optional[int] = generate_key(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = cipher_text(lowercase_ , lowercase_ )
print(f"Encrypted Text = {s}" )
print(f"Original Text = {original_text(lowercase_ , lowercase_ )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 675 | 1 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a : List[str] = getLogger(__name__)
a : Any = """cuda""" if torch.cuda.is_available() else """cpu"""
def snake_case__ ( lowercase , lowercase , lowercase , lowercase = 8 , lowercase = DEFAULT_DEVICE , lowercase=False , lowercase="summarization" , lowercase=None , **lowercase , ):
lowerCAmelCase_: Union[str, Any] = Path(lowercase ).open("w" , encoding="utf-8" )
lowerCAmelCase_: Union[str, Any] = str(lowercase )
lowerCAmelCase_: Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(lowercase ).to(lowercase )
if fpaa:
lowerCAmelCase_: Optional[int] = model.half()
lowerCAmelCase_: List[Any] = AutoTokenizer.from_pretrained(lowercase )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
lowerCAmelCase_: List[Any] = time.time()
# update config with task specific params
use_task_specific_params(lowercase , lowercase )
if prefix is None:
lowerCAmelCase_: Union[str, Any] = prefix or getattr(model.config , "prefix" , "" ) or ""
for examples_chunk in tqdm(list(chunks(lowercase , lowercase ) ) ):
lowerCAmelCase_: str = [prefix + text for text in examples_chunk]
lowerCAmelCase_: int = tokenizer(lowercase , return_tensors="pt" , truncation=lowercase , padding="longest" ).to(lowercase )
lowerCAmelCase_: Optional[Any] = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **lowercase , )
lowerCAmelCase_: str = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
lowerCAmelCase_: Dict = int(time.time() - start_time ) # seconds
lowerCAmelCase_: str = len(lowercase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def snake_case__ ( ):
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def snake_case__ ( lowercase=True ):
lowerCAmelCase_: Dict = argparse.ArgumentParser()
parser.add_argument("model_name" , type=lowercase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=lowercase , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=lowercase , help="where to save summaries" )
parser.add_argument("--reference_path" , type=lowercase , required=lowercase , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=lowercase , required=lowercase , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=lowercase , required=lowercase , default=lowercase , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=lowercase , required=lowercase , default=lowercase , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=lowercase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowercase , default=8 , required=lowercase , help="batch size" )
parser.add_argument(
"--n_obs" , type=lowercase , default=-1 , required=lowercase , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=lowercase , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
lowerCAmelCase_ , lowerCAmelCase_: List[str] = parser.parse_known_args()
lowerCAmelCase_: List[Any] = parse_numeric_n_bool_cl_kwargs(lowercase )
if parsed_args and verbose:
print(F'''parsed the following generate kwargs: {parsed_args}''' )
lowerCAmelCase_: Optional[int] = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
lowerCAmelCase_: List[str] = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=lowercase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
lowerCAmelCase_: Optional[Any] = generate_summaries_or_translations(
lowercase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **lowercase , )
if args.reference_path is None:
return {}
# Compute scores
lowerCAmelCase_: Any = calculate_bleu if "translation" in args.task else calculate_rouge
lowerCAmelCase_: List[Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
lowerCAmelCase_: int = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(lowercase )]
lowerCAmelCase_: dict = score_fn(lowercase , lowercase )
scores.update(lowercase )
if args.dump_args:
scores.update(lowercase )
if args.info:
lowerCAmelCase_: Dict = args.info
if verbose:
print(lowercase )
if args.score_path is not None:
json.dump(lowercase , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True) | 613 | import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
a : List[str] = False
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
lowerCAmelCase_: List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowerCAmelCase_: Tuple = "A painting of a squirrel eating a burger "
lowerCAmelCase_: List[str] = torch.manual_seed(0 )
lowerCAmelCase_: Dict = pipe(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
lowerCAmelCase_: int = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowerCAmelCase_: List[str] = generator.manual_seed(0 )
lowerCAmelCase_: Dict = pipe(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _a ( self ):
lowerCAmelCase_: Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowerCAmelCase_: str = "A painting of a squirrel eating a burger "
lowerCAmelCase_: int = torch.manual_seed(0 )
lowerCAmelCase_: List[str] = pipe(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
lowerCAmelCase_: List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_: Optional[int] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 613 | 1 |
"""simple docstring"""
import os
def lowercase__(A = "input.txt" ) ->int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(A ) , A ) ) as input_file:
lowercase__ : Optional[Any]= [
[int(A ) for element in line.split("," )]
for line in input_file.readlines()
]
lowercase__ : Optional[Any]= len(A )
lowercase__ : Dict= len(matrix[0] )
lowercase__ : Any= [[-1 for _ in range(A )] for _ in range(A )]
for i in range(A ):
lowercase__ : Any= matrix[i][0]
for j in range(1 , A ):
for i in range(A ):
lowercase__ : List[str]= minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , A ):
lowercase__ : str= min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase__ : int= min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 85 | 1 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ ( a__ ):
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, 'width_multiplier' ) )
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_="swish", SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=0.25, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, ) -> Any:
UpperCamelCase : int = parent
UpperCamelCase : int = batch_size
UpperCamelCase : List[Any] = image_size
UpperCamelCase : List[str] = patch_size
UpperCamelCase : Optional[int] = num_channels
UpperCamelCase : List[str] = make_divisible(512 * width_multiplier, divisor=8 )
UpperCamelCase : List[str] = hidden_act
UpperCamelCase : Optional[int] = conv_kernel_size
UpperCamelCase : List[str] = output_stride
UpperCamelCase : Union[str, Any] = classifier_dropout_prob
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Any = is_training
UpperCamelCase : int = num_labels
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Tuple = scope
UpperCamelCase : List[str] = width_multiplier
UpperCamelCase : Any = ffn_dropout
UpperCamelCase : List[Any] = attn_dropout
def snake_case_ ( self ) -> int:
UpperCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : List[str] = None
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCamelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ ( self ) -> int:
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Any = MobileViTVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Optional[int] = self.num_labels
UpperCamelCase : Tuple = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase : Any = self.num_labels
UpperCamelCase : Optional[Any] = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def snake_case_ ( self ) -> List[Any]:
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = config_and_inputs
UpperCamelCase : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Any = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Dict = MobileViTVaModelTester(self )
UpperCamelCase : Optional[Any] = MobileViTVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def snake_case_ ( self ) -> int:
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def snake_case_ ( self ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self ) -> Any:
pass
def snake_case_ ( self ) -> List[str]:
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : str = [*signature.parameters.keys()]
UpperCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Tuple:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCamelCase : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : Tuple = outputs.hidden_states
UpperCamelCase : Dict = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCamelCase : Any = 2
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> str:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ) -> Optional[Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def snake_case_ ( self ) -> str:
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Any = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.default_image_processor
UpperCamelCase : Any = prepare_img()
UpperCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCamelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : List[str] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Union[str, Any] = prepare_img()
UpperCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = outputs.logits
# verify the logits
UpperCamelCase : Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
], device=SCREAMING_SNAKE_CASE_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : str = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Optional[int] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
UpperCamelCase : Tuple = prepare_img()
UpperCamelCase : int = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCamelCase : str = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = outputs.logits.detach().cpu()
UpperCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_, target_sizes=[(50, 60)] )
UpperCamelCase : Optional[int] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
| 40 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
SCREAMING_SNAKE_CASE__ :str = 'segformer.encoder.' + key
if key.startswith('backbone' ):
SCREAMING_SNAKE_CASE__ :List[str] = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
SCREAMING_SNAKE_CASE__ :int = key[key.find('patch_embed' ) + len('patch_embed' )]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCAmelCase__ )-1}''' )
if "norm" in key:
SCREAMING_SNAKE_CASE__ :Tuple = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
SCREAMING_SNAKE_CASE__ :Dict = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
SCREAMING_SNAKE_CASE__ :List[Any] = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCAmelCase__ )-1}''' )
if "layer_norm1" in key:
SCREAMING_SNAKE_CASE__ :List[Any] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
SCREAMING_SNAKE_CASE__ :Dict = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
SCREAMING_SNAKE_CASE__ :str = key[key.find('block' ) + len('block' )]
SCREAMING_SNAKE_CASE__ :Tuple = key.replace(F'''block{idx}''' , F'''block.{int(UpperCAmelCase__ )-1}''' )
if "attn.q" in key:
SCREAMING_SNAKE_CASE__ :Tuple = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
SCREAMING_SNAKE_CASE__ :List[str] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
SCREAMING_SNAKE_CASE__ :Tuple = key.replace('attn' , 'attention.self' )
if "fc1" in key:
SCREAMING_SNAKE_CASE__ :Tuple = key.replace('fc1' , 'dense1' )
if "fc2" in key:
SCREAMING_SNAKE_CASE__ :int = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
SCREAMING_SNAKE_CASE__ :List[Any] = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
SCREAMING_SNAKE_CASE__ :Optional[Any] = key.replace('linear_fuse.conv' , 'linear_fuse' )
SCREAMING_SNAKE_CASE__ :Optional[int] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
SCREAMING_SNAKE_CASE__ :List[Any] = key[key.find('linear_c' ) + len('linear_c' )]
SCREAMING_SNAKE_CASE__ :str = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCAmelCase__ )-1}''' )
if key.startswith('head' ):
SCREAMING_SNAKE_CASE__ :List[str] = key.replace('head' , 'classifier' )
SCREAMING_SNAKE_CASE__ :List[Any] = value
return new_state_dict
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Any ) -> Tuple:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
SCREAMING_SNAKE_CASE__ :List[Any] = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
SCREAMING_SNAKE_CASE__ :Dict = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ :Tuple = kv_weight[
: config.hidden_sizes[i], :
]
SCREAMING_SNAKE_CASE__ :Tuple = kv_bias[: config.hidden_sizes[i]]
SCREAMING_SNAKE_CASE__ :Optional[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
SCREAMING_SNAKE_CASE__ :Optional[int] = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE__ :Any = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
return image
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Dict = SegformerConfig()
SCREAMING_SNAKE_CASE__ :Tuple = False
# set attributes based on model_name
SCREAMING_SNAKE_CASE__ :Any = 'huggingface/label-files'
if "segformer" in model_name:
SCREAMING_SNAKE_CASE__ :Tuple = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
SCREAMING_SNAKE_CASE__ :List[str] = 1_5_0
SCREAMING_SNAKE_CASE__ :Dict = 'ade20k-id2label.json'
SCREAMING_SNAKE_CASE__ :List[str] = (1, 1_5_0, 1_2_8, 1_2_8)
elif "city" in model_name:
SCREAMING_SNAKE_CASE__ :Tuple = 1_9
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 'cityscapes-id2label.json'
SCREAMING_SNAKE_CASE__ :Optional[int] = (1, 1_9, 1_2_8, 1_2_8)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :Optional[int] = model_name[4:6]
SCREAMING_SNAKE_CASE__ :Tuple = 1_0_0_0
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE__ :Optional[Any] = (1, 1_0_0_0)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
SCREAMING_SNAKE_CASE__ :Dict = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ :Optional[Any] = idalabel
SCREAMING_SNAKE_CASE__ :List[Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
SCREAMING_SNAKE_CASE__ :Optional[int] = [6_4, 1_2_8, 3_2_0, 5_1_2]
SCREAMING_SNAKE_CASE__ :Any = 2_5_6
elif size == "b2":
SCREAMING_SNAKE_CASE__ :List[str] = [6_4, 1_2_8, 3_2_0, 5_1_2]
SCREAMING_SNAKE_CASE__ :Any = 7_6_8
SCREAMING_SNAKE_CASE__ :Optional[int] = [3, 4, 6, 3]
elif size == "b3":
SCREAMING_SNAKE_CASE__ :List[str] = [6_4, 1_2_8, 3_2_0, 5_1_2]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 7_6_8
SCREAMING_SNAKE_CASE__ :Optional[int] = [3, 4, 1_8, 3]
elif size == "b4":
SCREAMING_SNAKE_CASE__ :Tuple = [6_4, 1_2_8, 3_2_0, 5_1_2]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 7_6_8
SCREAMING_SNAKE_CASE__ :Any = [3, 8, 2_7, 3]
elif size == "b5":
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [6_4, 1_2_8, 3_2_0, 5_1_2]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 7_6_8
SCREAMING_SNAKE_CASE__ :List[str] = [3, 6, 4_0, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
SCREAMING_SNAKE_CASE__ :int = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=UpperCAmelCase__ , align=UpperCAmelCase__ , do_random_crop=UpperCAmelCase__ )
# prepare image
SCREAMING_SNAKE_CASE__ :Dict = prepare_img()
SCREAMING_SNAKE_CASE__ :List[Any] = image_processor(images=UpperCAmelCase__ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
SCREAMING_SNAKE_CASE__ :str = torch.load(UpperCAmelCase__ , map_location=torch.device('cpu' ) )
else:
SCREAMING_SNAKE_CASE__ :str = torch.load(UpperCAmelCase__ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
SCREAMING_SNAKE_CASE__ :Tuple = rename_keys(UpperCAmelCase__ , encoder_only=UpperCAmelCase__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(UpperCAmelCase__ , UpperCAmelCase__ )
# create HuggingFace model and load state dict
if encoder_only:
SCREAMING_SNAKE_CASE__ :Any = False
SCREAMING_SNAKE_CASE__ :int = SegformerForImageClassification(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE__ :int = SegformerForSemanticSegmentation(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
# forward pass
SCREAMING_SNAKE_CASE__ :Optional[Any] = model(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ :Dict = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ :Any = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ :List[Any] = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
SCREAMING_SNAKE_CASE__ :Optional[Any] = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
SCREAMING_SNAKE_CASE__ :Tuple = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ :List[Any] = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
SCREAMING_SNAKE_CASE__ :List[Any] = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
SCREAMING_SNAKE_CASE__ :Dict = torch.tensor(
[
[
[-1.1_372e01, -1.2_787e01, -1.3_477e01],
[-1.2_536e01, -1.4_194e01, -1.4_409e01],
[-1.3_217e01, -1.4_888e01, -1.5_327e01],
],
[
[-1.4_791e01, -1.7_122e01, -1.8_277e01],
[-1.7_163e01, -1.9_192e01, -1.9_533e01],
[-1.7_897e01, -1.9_991e01, -2.0_315e01],
],
[
[7.6_723e-01, 4.1_921e-01, -7.7_878e-02],
[4.7_772e-01, 9.5_557e-03, -2.8_082e-01],
[3.6_032e-01, -2.4_826e-01, -5.1_168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
SCREAMING_SNAKE_CASE__ :Optional[Any] = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ :List[str] = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ :List[str] = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ :int = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
SCREAMING_SNAKE_CASE__ :int = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
SCREAMING_SNAKE_CASE__ :Optional[int] = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
UpperCamelCase_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 320 | '''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : List[str] = 'roberta'
def __init__( self : Union[str, Any] , UpperCamelCase_ : Dict=5_02_65 , UpperCamelCase_ : List[Any]=7_68 , UpperCamelCase_ : List[Any]=12 , UpperCamelCase_ : int=12 , UpperCamelCase_ : str=30_72 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Any=5_12 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : List[str]=1e-12 , UpperCamelCase_ : Optional[int]=1 , UpperCamelCase_ : str=0 , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : Optional[Any]="absolute" , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Any , ) -> List[str]:
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = vocab_size
SCREAMING_SNAKE_CASE__ :List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ :str = num_hidden_layers
SCREAMING_SNAKE_CASE__ :Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ :Any = hidden_act
SCREAMING_SNAKE_CASE__ :List[str] = intermediate_size
SCREAMING_SNAKE_CASE__ :List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ :Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ :List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ :Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ :Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ :Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE__ :List[Any] = position_embedding_type
SCREAMING_SNAKE_CASE__ :int = use_cache
SCREAMING_SNAKE_CASE__ :Dict = classifier_dropout
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
@property
def __lowerCamelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ :Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE__ :List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 320 | 1 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[int] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 79 |
'''simple docstring'''
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ):
"""simple docstring"""
super().__init__(
UpperCamelCase__ , split=UpperCamelCase__ , features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , streaming=UpperCamelCase__ , num_proc=UpperCamelCase__ , **UpperCamelCase__ , )
a_ = field
a_ = path_or_paths if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else {self.split: path_or_paths}
a_ = Json(
cache_dir=UpperCamelCase__ , data_files=UpperCamelCase__ , features=UpperCamelCase__ , field=UpperCamelCase__ , **UpperCamelCase__ , )
def _a ( self ):
"""simple docstring"""
if self.streaming:
a_ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
a_ = None
a_ = None
a_ = None
a_ = None
self.builder.download_and_prepare(
download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , num_proc=self.num_proc , )
a_ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory )
return dataset
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'num_proc {num_proc} must be an integer > 0.' )
a_ = dataset
a_ = path_or_buf
a_ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
a_ = num_proc
a_ = 'utf-8'
a_ = to_json_kwargs
def _a ( self ):
"""simple docstring"""
a_ = self.to_json_kwargs.pop('path_or_buf' , UpperCamelCase__ )
a_ = self.to_json_kwargs.pop('orient' , 'records' )
a_ = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
a_ = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
a_ = self.to_json_kwargs.pop('compression' , UpperCamelCase__ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=UpperCamelCase__ ) as buffer:
a_ = self._write(file_obj=UpperCamelCase__ , orient=UpperCamelCase__ , lines=UpperCamelCase__ , index=UpperCamelCase__ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'The compression parameter is not supported when writing to a buffer, but compression={compression}'
' was passed. Please provide a local path instead.' )
a_ = self._write(
file_obj=self.path_or_buf , orient=UpperCamelCase__ , lines=UpperCamelCase__ , index=UpperCamelCase__ , **self.to_json_kwargs )
return written
def _a ( self , UpperCamelCase__ ):
"""simple docstring"""
a_ , a_ , a_ , a_ , a_ = args
a_ = query_table(
table=self.dataset.data , key=slice(UpperCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , )
a_ = batch.to_pandas().to_json(
path_or_buf=UpperCamelCase__ , orient=UpperCamelCase__ , lines=UpperCamelCase__ , index=UpperCamelCase__ , **UpperCamelCase__ )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def _a ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ):
"""simple docstring"""
a_ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
a_ = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(UpperCamelCase__ )
else:
a_ , a_ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCamelCase__ , UpperCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(UpperCamelCase__ )
return written
| 536 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowercase ( A, unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = KandinskyVaaInpaintPipeline
_A : Union[str, Any] = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_A : Any = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_A : Optional[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_A : List[str] = False
@property
def A_ ( self : Optional[Any] ):
return 32
@property
def A_ ( self : str ):
return 32
@property
def A_ ( self : Dict ):
return self.time_input_dim
@property
def A_ ( self : Tuple ):
return self.time_input_dim * 4
@property
def A_ ( self : Any ):
return 100
@property
def A_ ( self : Tuple ):
torch.manual_seed(0 )
UpperCamelCase__ = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
UpperCamelCase__ = UNetaDConditionModel(**_a )
return model
@property
def A_ ( self : List[str] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A_ ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCamelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def A_ ( self : Any ):
UpperCamelCase__ = self.dummy_unet
UpperCamelCase__ = self.dummy_movq
UpperCamelCase__ = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
UpperCamelCase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def A_ ( self : List[Any] , _a : str , _a : Dict=0 ):
UpperCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_a ) ).to(_a )
UpperCamelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
UpperCamelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_a ) ).to(_a )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase__ = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
UpperCamelCase__ = np.ones((64, 64) , dtype=np.floataa )
UpperCamelCase__ = 0
if str(_a ).startswith('''mps''' ):
UpperCamelCase__ = torch.manual_seed(_a )
else:
UpperCamelCase__ = torch.Generator(device=_a ).manual_seed(_a )
UpperCamelCase__ = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def A_ ( self : int ):
UpperCamelCase__ = '''cpu'''
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**_a )
UpperCamelCase__ = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
UpperCamelCase__ = pipe(**self.get_dummy_inputs(_a ) )
UpperCamelCase__ = output.images
UpperCamelCase__ = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def A_ ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[Any] ):
UpperCamelCase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
UpperCamelCase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCamelCase__ = np.ones((768, 768) , dtype=np.floataa )
UpperCamelCase__ = 0
UpperCamelCase__ = '''a hat'''
UpperCamelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
UpperCamelCase__ = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
UpperCamelCase__ = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
UpperCamelCase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCamelCase__ , UpperCamelCase__ = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
UpperCamelCase__ = pipeline(
image=_a , mask_image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
UpperCamelCase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a , _a )
| 715 | import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase_ ( UpperCamelCase__ : List[str], UpperCamelCase__ : Any ):
'''simple docstring'''
UpperCamelCase__ = torch.load(UpperCamelCase__, map_location='''cpu''' )
UpperCamelCase__ = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCamelCase__ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCamelCase__ = v
else:
UpperCamelCase__ = v
UpperCamelCase__ = chkpt['''params''']
UpperCamelCase__ = {n: v for n, v in config.items() if not isinstance(UpperCamelCase__, (torch.FloatTensor, numpy.ndarray) )}
UpperCamelCase__ = chkpt['''dico_word2id''']
UpperCamelCase__ = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''', '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCamelCase__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCamelCase__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCamelCase__ = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(UpperCamelCase__, UpperCamelCase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(UpperCamelCase__, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__, indent=2 ) + '''\n''' )
print(F"""Save vocab file to {pytorch_config_dump_path}""" )
with open(UpperCamelCase__, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(UpperCamelCase__, indent=2 ) + '''\n''' )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 591 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCAmelCase = '\nHuman: <<task>>\n\nAssistant: '
__lowerCAmelCase = 'huggingface-tools/default-prompts'
__lowerCAmelCase = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def a ( a , a , a="run" ) ->str:
'''simple docstring'''
if prompt_or_repo_id is None:
SCREAMING_SNAKE_CASE = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , a ) is not None:
return prompt_or_repo_id
SCREAMING_SNAKE_CASE = cached_file(
a , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(a , '''r''' , encoding='''utf-8''' ) as f:
return f.read() | 201 |
# Function to print upper half of diamond (pyramid)
def a ( a ) ->Optional[Any]:
'''simple docstring'''
for i in range(0 , a ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def a ( a ) ->Union[str, Any]:
'''simple docstring'''
for i in range(a , 0 , -1 ):
for _ in range(a , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def a ( a ) ->Optional[int]:
'''simple docstring'''
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(a ) # upper half
reverse_floyd(a ) # lower half
if __name__ == "__main__":
print(R'| /\ | |- | |- |--| |\ /| |-')
print(R'|/ \| |- |_ |_ |__| | \/ | |_')
__lowerCAmelCase = 1
while K:
__lowerCAmelCase = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
__lowerCAmelCase = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...') | 201 | 1 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self : str , lowercase__ : Optional[Any] , lowercase__ : Any=13 , lowercase__ : List[str]=7 , lowercase__ : Optional[int]=True , lowercase__ : List[Any]=True , lowercase__ : Dict=True , lowercase__ : str=True , lowercase__ : List[str]=99 , lowercase__ : str=32 , lowercase__ : int=5 , lowercase__ : List[str]=4 , lowercase__ : Optional[int]=37 , lowercase__ : Dict="gelu" , lowercase__ : int=0.1 , lowercase__ : Dict=0.1 , lowercase__ : Any=5_12 , lowercase__ : Any=16 , lowercase__ : Optional[int]=2 , lowercase__ : List[Any]=0.0_2 , lowercase__ : Optional[Any]=4 , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_attention_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_choices
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_attention_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = True
_lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =True
UpperCamelCase__ =(
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = FlaxBertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
_lowerCAmelCase = FlaxBertModel.from_pretrained('bert-base-cased' )
_lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowercase__ )
| 712 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase: int = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Any = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: str = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_lowercase: Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 225 | 0 |
"""simple docstring"""
import os
import sys
import unittest
_lowerCAmelCase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
_lowerCAmelCase : str = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class A_ ( unittest.TestCase ):
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = get_test_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = get_test_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Dict = {"BertModelTest": "BertModelTester"}
_lowerCamelCase : Union[str, Any] = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Any = get_model_to_test_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[int] = get_model_to_test_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
_lowerCamelCase : Optional[Any] = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
def _lowercase ( self: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = get_model_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Dict = get_model_to_tester_mapping(__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
_lowerCamelCase : List[str] = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase )
self.assertEqual(get_test_info.to_json(__lowerCAmelCase ) ,__lowerCAmelCase ) | 46 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False ) -> int:
'''simple docstring'''
_lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""transformer.blocks.{i}.norm1.weight""", F"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm1.bias""", F"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.weight""", F"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""transformer.blocks.{i}.attn.proj.bias""", F"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.weight""", F"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.norm2.bias""", F"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""transformer.blocks.{i}.mlp.fc1.weight""", F"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc1.bias""", F"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.weight""", F"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""transformer.blocks.{i}.mlp.fc2.bias""", F"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
] )
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
] )
else:
pass
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCamelCase : Tuple = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.weight""" )
_lowerCamelCase : List[Any] = state_dict.pop(F"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
_lowerCamelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase_( _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase : List[Any] = dct.pop(_lowerCamelCase )
_lowerCamelCase : Optional[int] = val
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=_lowerCamelCase )
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = False
if "vqa" in checkpoint_url:
_lowerCamelCase : str = True
_lowerCamelCase : Union[str, Any] = 3129
_lowerCamelCase : str = "huggingface/label-files"
_lowerCamelCase : Optional[Any] = "vqa2-id2label.json"
_lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
_lowerCamelCase : Any = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : int = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Any = ViltForQuestionAnswering(_lowerCamelCase )
elif "nlvr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = {0: "False", 1: "True"}
_lowerCamelCase : int = {v: k for k, v in config.idalabel.items()}
_lowerCamelCase : Optional[Any] = 3
_lowerCamelCase : Optional[Any] = ViltForImagesAndTextClassification(_lowerCamelCase )
elif "irtr" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = ViltForImageAndTextRetrieval(_lowerCamelCase )
elif "mlm_itm" in checkpoint_url:
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = ViltForMaskedLM(_lowerCamelCase )
else:
raise ValueError("Unknown model type" )
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : List[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["state_dict"]
_lowerCamelCase : str = create_rename_keys(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
if mlm_model or irtr_model:
_lowerCamelCase : Dict = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(_lowerCamelCase )
# Define processor
_lowerCamelCase : int = ViltImageProcessor(size=384 )
_lowerCamelCase : Union[str, Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
_lowerCamelCase : Optional[int] = ViltProcessor(_lowerCamelCase , _lowerCamelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCamelCase : int = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : Union[str, Any] = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg" , stream=_lowerCamelCase ).raw )
_lowerCamelCase : str = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
_lowerCamelCase : List[str] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Optional[int] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCamelCase : str = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg" , stream=_lowerCamelCase ).raw )
if mlm_model:
_lowerCamelCase : Any = "a bunch of [MASK] laying on a [MASK]."
else:
_lowerCamelCase : List[str] = "How many cats are there?"
_lowerCamelCase : Union[str, Any] = processor(_lowerCamelCase , _lowerCamelCase , return_tensors="pt" )
_lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
# Verify outputs
if mlm_model:
_lowerCamelCase : List[str] = torch.Size([1, 11, 30522] )
_lowerCamelCase : Dict = torch.tensor([-1_2.5_0_6_1, -1_2.5_1_2_3, -1_2.5_1_7_4] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
_lowerCamelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCamelCase : List[str] = torch.Size([1, 3129] )
_lowerCamelCase : List[str] = torch.tensor([-1_5.9_4_9_5, -1_8.1_4_7_2, -1_0.3_0_4_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , _lowerCamelCase , atol=1e-4 )
# verify vqa prediction equals "2"
_lowerCamelCase : Union[str, Any] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCamelCase : List[str] = torch.Size([1, 2] )
_lowerCamelCase : Optional[Any] = torch.tensor([-2.8_7_2_1, 2.1_2_9_1] )
assert torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 46 | 1 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
SCREAMING_SNAKE_CASE = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
SCREAMING_SNAKE_CASE = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n'
SCREAMING_SNAKE_CASE = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n'
def _lowerCamelCase ( __A : Optional[int] ) -> int:
def remove_articles(__A : List[str] ):
_UpperCAmelCase : List[str] = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(__A , ''' ''' , __A )
def white_space_fix(__A : Dict ):
return " ".join(text.split() )
def remove_punc(__A : Any ):
_UpperCAmelCase : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__A : Optional[Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__A ) ) ) )
def _lowerCamelCase ( __A : Tuple , __A : int ) -> Optional[Any]:
return int(normalize_answer(__A ) == normalize_answer(__A ) )
def _lowerCamelCase ( __A : str , __A : List[Any] ) -> str:
_UpperCAmelCase : Dict = [any(compute_exact(__A , __A ) for ref in refs ) for pred, refs in zip(__A , __A )]
return (sum(__A ) / len(__A )) * 100
def _lowerCamelCase ( __A : Dict , __A : Any , __A : Tuple , __A : Dict ) -> Any:
_UpperCAmelCase : Optional[int] = [rgram for rgrams in rgramslist for rgram in rgrams]
_UpperCAmelCase : List[Any] = Counter(__A )
_UpperCAmelCase : Union[str, Any] = Counter(__A )
_UpperCAmelCase : Tuple = Counter()
for sgram, scount in sgramcounter.items():
_UpperCAmelCase : Union[str, Any] = scount * numref
_UpperCAmelCase : List[str] = Counter(__A )
_UpperCAmelCase : Optional[int] = Counter()
for cgram, ccount in cgramcounter.items():
_UpperCAmelCase : Optional[Any] = ccount * numref
# KEEP
_UpperCAmelCase : List[str] = sgramcounter_rep & cgramcounter_rep
_UpperCAmelCase : Optional[int] = keepgramcounter_rep & rgramcounter
_UpperCAmelCase : Union[str, Any] = sgramcounter_rep & rgramcounter
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : Union[str, Any] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCAmelCase : Any = 1
_UpperCAmelCase : List[Any] = 1
if len(__A ) > 0:
_UpperCAmelCase : List[str] = keeptmpscorea / len(__A )
if len(__A ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_UpperCAmelCase : List[str] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_UpperCAmelCase : List[str] = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_UpperCAmelCase : str = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_UpperCAmelCase : List[Any] = sgramcounter_rep - cgramcounter_rep
_UpperCAmelCase : Optional[Any] = delgramcounter_rep - rgramcounter
_UpperCAmelCase : Any = sgramcounter_rep - rgramcounter
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCAmelCase : Dict = 1
if len(__A ) > 0:
_UpperCAmelCase : Optional[int] = deltmpscorea / len(__A )
# ADDITION
_UpperCAmelCase : int = set(__A ) - set(__A )
_UpperCAmelCase : str = set(__A ) & set(__A )
_UpperCAmelCase : Optional[int] = set(__A ) - set(__A )
_UpperCAmelCase : List[Any] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Tuple = 1
if len(__A ) > 0:
_UpperCAmelCase : Union[str, Any] = addtmpscore / len(__A )
if len(__A ) > 0:
_UpperCAmelCase : int = addtmpscore / len(__A )
_UpperCAmelCase : Optional[Any] = 0
if addscore_precision > 0 or addscore_recall > 0:
_UpperCAmelCase : str = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def _lowerCamelCase ( __A : List[str] , __A : Optional[Any] , __A : int ) -> Union[str, Any]:
_UpperCAmelCase : List[str] = len(__A )
_UpperCAmelCase : Optional[Any] = ssent.split(''' ''' )
_UpperCAmelCase : Optional[int] = csent.split(''' ''' )
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = []
for rsent in rsents:
_UpperCAmelCase : Dict = rsent.split(''' ''' )
_UpperCAmelCase : Any = []
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Optional[Any] = []
ragramslist.append(__A )
for i in range(0 , len(__A ) - 1 ):
if i < len(__A ) - 1:
_UpperCAmelCase : List[Any] = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(__A )
if i < len(__A ) - 2:
_UpperCAmelCase : Optional[int] = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(__A )
if i < len(__A ) - 3:
_UpperCAmelCase : Dict = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(__A )
ragramslist.append(__A )
ragramslist.append(__A )
ragramslist.append(__A )
for i in range(0 , len(__A ) - 1 ):
if i < len(__A ) - 1:
_UpperCAmelCase : Tuple = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(__A )
if i < len(__A ) - 2:
_UpperCAmelCase : List[Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(__A )
if i < len(__A ) - 3:
_UpperCAmelCase : Any = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(__A )
for i in range(0 , len(__A ) - 1 ):
if i < len(__A ) - 1:
_UpperCAmelCase : Union[str, Any] = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(__A )
if i < len(__A ) - 2:
_UpperCAmelCase : Dict = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(__A )
if i < len(__A ) - 3:
_UpperCAmelCase : Tuple = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(__A )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : Any = SARIngram(__A , __A , __A , __A )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : List[Any] = SARIngram(__A , __A , __A , __A )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = SARIngram(__A , __A , __A , __A )
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : str = SARIngram(__A , __A , __A , __A )
_UpperCAmelCase : List[Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_UpperCAmelCase : Optional[Any] = sum([delascore, delascore, delascore, delascore] ) / 4
_UpperCAmelCase : Optional[int] = sum([addascore, addascore, addascore, addascore] ) / 4
_UpperCAmelCase : Optional[int] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def _lowerCamelCase ( __A : int , __A : bool = True , __A : str = "13a" , __A : bool = True ) -> List[str]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_UpperCAmelCase : Union[str, Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_UpperCAmelCase : Optional[int] = sacrebleu.metrics.bleu._get_tokenizer(__A )()(__A )
else:
_UpperCAmelCase : str = sacrebleu.TOKENIZERS[tokenizer]()(__A )
elif tokenizer == "moses":
_UpperCAmelCase : List[str] = sacremoses.MosesTokenizer().tokenize(__A , return_str=__A , escape=__A )
elif tokenizer == "penn":
_UpperCAmelCase : int = sacremoses.MosesTokenizer().penn_tokenize(__A , return_str=__A )
else:
_UpperCAmelCase : Any = sentence
if not return_str:
_UpperCAmelCase : Union[str, Any] = normalized_sent.split()
return normalized_sent
def _lowerCamelCase ( __A : str , __A : List[Any] , __A : Dict ) -> Any:
if not (len(__A ) == len(__A ) == len(__A )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_UpperCAmelCase : List[Any] = 0
for src, pred, refs in zip(__A , __A , __A ):
sari_score += SARIsent(normalize(__A ) , normalize(__A ) , [normalize(__A ) for sent in refs] )
_UpperCAmelCase : List[str] = sari_score / len(__A )
return 100 * sari_score
def _lowerCamelCase ( __A : Tuple , __A : Optional[Any] , __A : Any="exp" , __A : List[str]=None , __A : List[str]=False , __A : str=False , __A : List[str]=False , ) -> Tuple:
_UpperCAmelCase : Dict = len(references[0] )
if any(len(__A ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_UpperCAmelCase : str = [[refs[i] for refs in references] for i in range(__A )]
_UpperCAmelCase : Union[str, Any] = sacrebleu.corpus_bleu(
__A , __A , smooth_method=__A , smooth_value=__A , force=__A , lowercase=__A , use_effective_order=__A , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''),
}) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def snake_case__ ( self , _A , _A , _A) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Any = {}
result.update({'''sari''': compute_sari(sources=_A , predictions=_A , references=_A)})
result.update({'''sacrebleu''': compute_sacrebleu(predictions=_A , references=_A)})
result.update({'''exact''': compute_em(predictions=_A , references=_A)})
return result
| 186 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
SCREAMING_SNAKE_CASE = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def _lowerCamelCase ( __A : Optional[Any] ) -> int:
_UpperCAmelCase : Optional[int] = test_results.split(''' ''' )
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : Union[str, Any] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_UpperCAmelCase : Tuple = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__A ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _lowerCamelCase ( __A : List[str] ) -> int:
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : int = None
_UpperCAmelCase : Any = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , __A ):
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : Optional[Any] = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
_UpperCAmelCase : int = line
_UpperCAmelCase : Optional[Any] = False
return failures
class A_ :
'''simple docstring'''
def __init__( self , _A , _A) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = title
_UpperCAmelCase : List[str] = doc_test_results['''time_spent'''].split(''',''')[0]
_UpperCAmelCase : List[Any] = doc_test_results['''success''']
_UpperCAmelCase : Optional[Any] = doc_test_results['''failures''']
_UpperCAmelCase : Tuple = self.n_success + self.n_failures
# Failures and success of the modeling tests
_UpperCAmelCase : Optional[int] = doc_test_results
@property
def snake_case__ ( self) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = [self._time_spent]
_UpperCAmelCase : Dict = 0
for time in time_spent:
_UpperCAmelCase : Optional[int] = time.split(''':''')
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_A) == 1:
_UpperCAmelCase : List[Any] = [0, 0, time_parts[0]]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
total_secs += hours * 3600 + minutes * 60 + seconds
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f'''{int(_A)}h{int(_A)}m{int(_A)}s'''
@property
def snake_case__ ( self) -> Dict:
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def snake_case__ ( self) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def snake_case__ ( self) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
f''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def snake_case__ ( self) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[str] = 40
_UpperCAmelCase : List[str] = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_A , _A)}
_UpperCAmelCase : List[Any] = ''''''
for category, failures in category_failures.items():
if len(_A) == 0:
continue
if report != "":
report += "\n\n"
report += f'''*{category} failures*:'''.ljust(line_length // 2).rjust(line_length // 2) + "\n"
report += "`"
report += "`\n`".join(_A)
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def snake_case__ ( self) -> str:
"""simple docstring"""
_UpperCAmelCase : int = [self.header]
if self.n_failures > 0:
blocks.append(self.failures)
if self.n_failures > 0:
blocks.extend([self.category_failures])
if self.n_failures == 0:
blocks.append(self.no_failures)
return json.dumps(_A)
@staticmethod
def snake_case__ ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print('''Sending the following payload''')
print(json.dumps({'''blocks''': json.loads(_A)}))
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_A , )
def snake_case__ ( self) -> str:
"""simple docstring"""
print('''Sending the following payload''')
print(json.dumps({'''blocks''': json.loads(self.payload)}))
_UpperCAmelCase : str = f'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else '''All tests passed.'''
_UpperCAmelCase : Optional[Any] = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_A , )
def snake_case__ ( self , _A , _A , _A , _A) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = ''''''
for key, value in failures.items():
_UpperCAmelCase : str = value[:200] + ''' [Truncated]''' if len(_A) > 250 else value
failures_text += f'''*{key}*\n_{value}_\n\n'''
_UpperCAmelCase : Optional[Any] = job_name
_UpperCAmelCase : Any = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
_UpperCAmelCase : List[Any] = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def snake_case__ ( self) -> Union[str, Any]:
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''')
_UpperCAmelCase : Tuple = self.doc_test_results.pop('''job_link''')
self.doc_test_results.pop('''failures''')
self.doc_test_results.pop('''success''')
self.doc_test_results.pop('''time_spent''')
_UpperCAmelCase : Any = sorted(self.doc_test_results.items() , key=lambda _A: t[0])
for job, job_result in sorted_dict:
if len(job_result['''failures''']):
_UpperCAmelCase : Dict = f'''*Num failures* :{len(job_result['failed'])} \n'''
_UpperCAmelCase : Tuple = job_result['''failures''']
_UpperCAmelCase : Dict = self.get_reply_blocks(_A , _A , _A , text=_A)
print('''Sending the following reply''')
print(json.dumps({'''blocks''': blocks}))
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=f'''Results for {job}''' , blocks=_A , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1)
def _lowerCamelCase ( ) -> str:
_UpperCAmelCase : Any = os.environ['''GITHUB_RUN_ID''']
_UpperCAmelCase : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
_UpperCAmelCase : Optional[int] = requests.get(__A ).json()
_UpperCAmelCase : Optional[int] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
_UpperCAmelCase : int = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__A ):
_UpperCAmelCase : int = requests.get(url + f'''&page={i + 2}''' ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , __A )
return {}
def _lowerCamelCase ( __A : str ) -> Tuple:
_UpperCAmelCase : Optional[Any] = {}
if os.path.exists(__A ):
_UpperCAmelCase : Optional[int] = os.listdir(__A )
for file in files:
try:
with open(os.path.join(__A , __A ) , encoding='''utf-8''' ) as f:
_UpperCAmelCase : Tuple = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'''Could not open {os.path.join(__A , __A )}.''' ) from e
return _artifact
def _lowerCamelCase ( ) -> List[str]:
class A_ :
'''simple docstring'''
def __init__( self , _A) -> int:
"""simple docstring"""
_UpperCAmelCase : List[Any] = name
_UpperCAmelCase : Union[str, Any] = []
def __str__( self) -> int:
"""simple docstring"""
return self.name
def snake_case__ ( self , _A) -> int:
"""simple docstring"""
self.paths.append({'''name''': self.name, '''path''': path})
_UpperCAmelCase : Dict[str, Artifact] = {}
_UpperCAmelCase : Dict = filter(os.path.isdir , os.listdir() )
for directory in directories:
_UpperCAmelCase : List[str] = directory
if artifact_name not in _available_artifacts:
_UpperCAmelCase : Optional[int] = Artifact(__A )
_available_artifacts[artifact_name].add_path(__A )
return _available_artifacts
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = get_job_links()
SCREAMING_SNAKE_CASE = retrieve_available_artifacts()
SCREAMING_SNAKE_CASE = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
SCREAMING_SNAKE_CASE = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
SCREAMING_SNAKE_CASE = github_actions_job_links.get('run_doctests')
SCREAMING_SNAKE_CASE = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
SCREAMING_SNAKE_CASE = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = handle_test_results(artifact['stats'])
SCREAMING_SNAKE_CASE = failed
SCREAMING_SNAKE_CASE = success
SCREAMING_SNAKE_CASE = time_spent[1:-1] + ', '
SCREAMING_SNAKE_CASE = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
SCREAMING_SNAKE_CASE = line.replace('FAILED ', '')
SCREAMING_SNAKE_CASE = line.split()[0].replace('\n', '')
if "::" in line:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = line.split('::')
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
SCREAMING_SNAKE_CASE = docs[file_regex]
doc_test_results[category]["failed"].append(test)
SCREAMING_SNAKE_CASE = all_failures[test] if test in all_failures else 'N/A'
SCREAMING_SNAKE_CASE = failure
break
SCREAMING_SNAKE_CASE = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 186 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_a : Optional[int] = logging.get_logger(__name__)
_a : Optional[Any] = {"""vocab_file""": """vocab.txt"""}
_a : str = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
_a : Optional[int] = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
_a : Tuple = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ConvBertTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True , _lowerCAmelCase="[UNK]" , _lowerCAmelCase="[SEP]" , _lowerCAmelCase="[PAD]" , _lowerCAmelCase="[CLS]" , _lowerCAmelCase="[MASK]" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
'''simple docstring'''
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
lowerCAmelCase__ :Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _lowerCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ :int = getattr(_lowerCAmelCase , normalizer_state.pop("type" ) )
lowerCAmelCase__ :List[Any] = do_lower_case
lowerCAmelCase__ :str = strip_accents
lowerCAmelCase__ :str = tokenize_chinese_chars
lowerCAmelCase__ :Tuple = normalizer_class(**_lowerCAmelCase )
lowerCAmelCase__ :str = do_lower_case
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = [self.sep_token_id]
lowerCAmelCase__ :Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 145 |
from __future__ import annotations
def snake_case__ ( UpperCAmelCase : str ):
return [ord(UpperCAmelCase ) - 9_6 for elem in plain]
def snake_case__ ( UpperCAmelCase : list[int] ):
return "".join(chr(elem + 9_6 ) for elem in encoded )
def snake_case__ ( ):
lowerCAmelCase__ :Optional[int] = encode(input("-> " ).strip().lower() )
print("Encoded: " , UpperCAmelCase )
print("Decoded:" , decode(UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 145 | 1 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowercase ( a__ ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = SMALL_MODEL_IDENTIFIER
_snake_case : Optional[int] = '''pt'''
_snake_case : Optional[Any] = '''tf'''
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : int ):
'''simple docstring'''
_snake_case : List[str] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(lowercase__ )
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
_snake_case : Dict = TFAutoModel.from_pretrained(self.test_model , from_pt=lowercase__ )
model_tf.save_pretrained(lowercase__ )
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : List[str] = '''mock_framework'''
# Framework provided - return whatever the user provides
_snake_case : Tuple = FeaturesManager.determine_framework(self.test_model , lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowercase__ )
_snake_case : Optional[Any] = FeaturesManager.determine_framework(lowercase__ , lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowercase__ )
_snake_case : Tuple = FeaturesManager.determine_framework(lowercase__ , lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(lowercase__ )
_snake_case : Dict = FeaturesManager.determine_framework(lowercase__ )
self.assertEqual(lowercase__ , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(lowercase__ )
_snake_case : Optional[int] = FeaturesManager.determine_framework(lowercase__ )
self.assertEqual(lowercase__ , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(lowercase__ ):
_snake_case : Union[str, Any] = FeaturesManager.determine_framework(lowercase__ )
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = MagicMock(return_value=lowercase__ )
with patch('transformers.onnx.features.is_tf_available' , lowercase__ ):
_snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowercase__ , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
_snake_case : Optional[Any] = MagicMock(return_value=lowercase__ )
with patch('transformers.onnx.features.is_torch_available' , lowercase__ ):
_snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowercase__ , self.framework_tf )
# Both in environment -> use PyTorch
_snake_case : Optional[int] = MagicMock(return_value=lowercase__ )
_snake_case : str = MagicMock(return_value=lowercase__ )
with patch('transformers.onnx.features.is_tf_available' , lowercase__ ), patch(
'transformers.onnx.features.is_torch_available' , lowercase__ ):
_snake_case : Tuple = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(lowercase__ , self.framework_pt )
# Both not in environment -> raise error
_snake_case : Dict = MagicMock(return_value=lowercase__ )
_snake_case : Optional[int] = MagicMock(return_value=lowercase__ )
with patch('transformers.onnx.features.is_tf_available' , lowercase__ ), patch(
'transformers.onnx.features.is_torch_available' , lowercase__ ):
with self.assertRaises(lowercase__ ):
_snake_case : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
| 703 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ : str = logging.get_logger(__name__)
class lowercase ( a_ ):
"""simple docstring"""
def __init__( self : int , *lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 652 | 0 |
class lowerCamelCase :
'''simple docstring'''
def __init__( self ):
UpperCAmelCase_ = {}
def A__ ( self ):
print(self.vertex )
for i in self.vertex:
print(lowerCAmelCase , " -> " , " -> ".join([str(lowerCAmelCase ) for j in self.vertex[i]] ) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowerCAmelCase )
else:
# else make a new vertex
UpperCAmelCase_ = [to_vertex]
def A__ ( self ):
# visited array for storing already visited nodes
UpperCAmelCase_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowerCAmelCase , lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
# mark start vertex as visited
UpperCAmelCase_ = True
print(lowerCAmelCase , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 579 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for a, b in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertAlmostEqual(lowerCAmelCase , lowerCAmelCase , delta=lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowerCAmelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def A__ ( self ):
UpperCAmelCase_ = None
ops.enable_eager_execution_internal()
UpperCAmelCase_ = tf.config.list_physical_devices("CPU" )
if len(lowerCAmelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
UpperCAmelCase_ = tf.config.list_logical_devices(device_type="CPU" )
UpperCAmelCase_ = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
UpperCAmelCase_ = GradientAccumulator()
UpperCAmelCase_ = tf.Variable([4.0, 3.0] )
UpperCAmelCase_ , UpperCAmelCase_ = create_optimizer(5e-5 , 10 , 5 )
UpperCAmelCase_ = tf.Variable([0.0, 0.0] , trainable=lowerCAmelCase )
def accumulate_on_replica(lowerCAmelCase ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(lowerCAmelCase , lowerCAmelCase ):
with strategy.scope():
UpperCAmelCase_ = strategy.experimental_local_results(lowerCAmelCase )
local_variables[0].assign(lowerCAmelCase )
local_variables[1].assign(lowerCAmelCase )
strategy.run(lowerCAmelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowerCAmelCase )
def _check_local_values(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , lowerCAmelCase , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , lowerCAmelCase , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 579 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = DebertaVaTokenizer
SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = DebertaVaTokenizer(A ,unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = """this is a test"""
UpperCAmelCase = """this is a test"""
return input_text, output_text
def _UpperCamelCase ( self ):
UpperCAmelCase = """<pad>"""
UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<pad>""" )
self.assertEqual(vocab_keys[1] ,"""<unk>""" )
self.assertEqual(vocab_keys[-1] ,"""[PAD]""" )
self.assertEqual(len(A ) ,30_001 )
def _UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,30_000 )
def _UpperCamelCase ( self ):
# fmt: off
UpperCAmelCase = """ \tHeLLo!how \n Are yoU? """
UpperCAmelCase = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(A ,do_lower_case=A )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
UpperCAmelCase = DebertaVaTokenizerFast(A ,do_lower_case=A )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
# fmt: off
UpperCAmelCase = """I was born in 92000, and this is falsé."""
UpperCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(A ,split_by_punct=A )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
UpperCAmelCase = DebertaVaTokenizerFast(A ,split_by_punct=A )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
def _UpperCamelCase ( self ):
# fmt: off
UpperCAmelCase = """I was born in 92000, and this is falsé."""
UpperCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(A ,do_lower_case=A ,split_by_punct=A )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
UpperCAmelCase = DebertaVaTokenizerFast(A ,do_lower_case=A ,split_by_punct=A )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
def _UpperCamelCase ( self ):
# fmt: off
UpperCAmelCase = """I was born in 92000, and this is falsé."""
UpperCAmelCase = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(A ,do_lower_case=A ,split_by_punct=A )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
UpperCAmelCase = DebertaVaTokenizerFast(A ,do_lower_case=A ,split_by_punct=A )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
def _UpperCamelCase ( self ):
# fmt: off
UpperCAmelCase = """I was born in 92000, and this is falsé."""
UpperCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(A ,do_lower_case=A ,split_by_punct=A )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
UpperCAmelCase = DebertaVaTokenizerFast(A ,do_lower_case=A ,split_by_punct=A )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
def _UpperCamelCase ( self ):
# fmt: off
UpperCAmelCase = """ \tHeLLo!how \n Are yoU? """
UpperCAmelCase = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
UpperCAmelCase = DebertaVaTokenizer(A ,do_lower_case=A ,split_by_punct=A )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
UpperCAmelCase = DebertaVaTokenizerFast(A ,do_lower_case=A ,split_by_punct=A )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = """I was born in 92000, and this is falsé."""
UpperCAmelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(A ,add_special_tokens=A ) )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(A ,add_special_tokens=A ) )
self.assertListEqual(A ,A )
UpperCAmelCase = tokenizer.encode(A ,add_special_tokens=A )
UpperCAmelCase = rust_tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(A )
UpperCAmelCase = rust_tokenizer.encode(A )
self.assertListEqual(A ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = """This is a test"""
UpperCAmelCase = [13, 1, 4_398, 25, 21, 1_289]
UpperCAmelCase = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCAmelCase = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCAmelCase = DebertaVaTokenizer(A ,keep_accents=A )
UpperCAmelCase = DebertaVaTokenizerFast(A ,keep_accents=A )
UpperCAmelCase = tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
UpperCAmelCase = tokenizer.tokenize(A )
self.assertListEqual(A ,A )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(A ,A )
UpperCAmelCase = rust_tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
UpperCAmelCase = rust_tokenizer.tokenize(A )
self.assertListEqual(A ,A )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(A ,A )
# fmt: off
UpperCAmelCase = """I was born in 92000, and this is falsé."""
UpperCAmelCase = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
UpperCAmelCase = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
UpperCAmelCase = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCAmelCase = tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
UpperCAmelCase = tokenizer.tokenize(A )
self.assertListEqual(A ,A )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(A ,A )
UpperCAmelCase = rust_tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
UpperCAmelCase = rust_tokenizer.tokenize(A )
self.assertListEqual(A ,A )
UpperCAmelCase = rust_tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(A ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase = DebertaVaTokenizer(A )
UpperCAmelCase = tokenizer.encode("""sequence builders""" )
UpperCAmelCase = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(A ,A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,A ,)
@slow
def _UpperCamelCase ( self ):
# fmt: off
UpperCAmelCase = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A ,model_name="""microsoft/deberta-v2-xlarge""" ,revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" ,)
| 74 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCamelCase__ :
def __init__( self ,A ):
UpperCAmelCase = data
# Initialize hash values
UpperCAmelCase = [
0x6A_09_E6_67,
0xBB_67_AE_85,
0x3C_6E_F3_72,
0xA5_4F_F5_3A,
0x51_0E_52_7F,
0x9B_05_68_8C,
0x1F_83_D9_AB,
0x5B_E0_CD_19,
]
# Initialize round constants
UpperCAmelCase = [
0x42_8A_2F_98,
0x71_37_44_91,
0xB5_C0_FB_CF,
0xE9_B5_DB_A5,
0x39_56_C2_5B,
0x59_F1_11_F1,
0x92_3F_82_A4,
0xAB_1C_5E_D5,
0xD8_07_AA_98,
0x12_83_5B_01,
0x24_31_85_BE,
0x55_0C_7D_C3,
0x72_BE_5D_74,
0x80_DE_B1_FE,
0x9B_DC_06_A7,
0xC1_9B_F1_74,
0xE4_9B_69_C1,
0xEF_BE_47_86,
0x0F_C1_9D_C6,
0x24_0C_A1_CC,
0x2D_E9_2C_6F,
0x4A_74_84_AA,
0x5C_B0_A9_DC,
0x76_F9_88_DA,
0x98_3E_51_52,
0xA8_31_C6_6D,
0xB0_03_27_C8,
0xBF_59_7F_C7,
0xC6_E0_0B_F3,
0xD5_A7_91_47,
0x06_CA_63_51,
0x14_29_29_67,
0x27_B7_0A_85,
0x2E_1B_21_38,
0x4D_2C_6D_FC,
0x53_38_0D_13,
0x65_0A_73_54,
0x76_6A_0A_BB,
0x81_C2_C9_2E,
0x92_72_2C_85,
0xA2_BF_E8_A1,
0xA8_1A_66_4B,
0xC2_4B_8B_70,
0xC7_6C_51_A3,
0xD1_92_E8_19,
0xD6_99_06_24,
0xF4_0E_35_85,
0x10_6A_A0_70,
0x19_A4_C1_16,
0x1E_37_6C_08,
0x27_48_77_4C,
0x34_B0_BC_B5,
0x39_1C_0C_B3,
0x4E_D8_AA_4A,
0x5B_9C_CA_4F,
0x68_2E_6F_F3,
0x74_8F_82_EE,
0x78_A5_63_6F,
0x84_C8_78_14,
0x8C_C7_02_08,
0x90_BE_FF_FA,
0xA4_50_6C_EB,
0xBE_F9_A3_F7,
0xC6_71_78_F2,
]
UpperCAmelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _UpperCamelCase ( A ):
UpperCAmelCase = b"""\x80""" + (b"""\x00""" * (63 - (len(A ) + 8) % 64))
UpperCAmelCase = struct.pack(""">Q""" ,(len(A ) * 8) )
return data + padding + big_endian_integer
def _UpperCamelCase ( self ):
# Convert into blocks of 64 bytes
UpperCAmelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
UpperCAmelCase = list(struct.unpack(""">16L""" ,A ) )
# add 48 0-ed integers
words += [0] * 48
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
UpperCAmelCase = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
UpperCAmelCase = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
UpperCAmelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
UpperCAmelCase = self.ror(A ,6 ) ^ self.ror(A ,11 ) ^ self.ror(A ,25 )
UpperCAmelCase = (e & f) ^ ((~e & 0xFF_FF_FF_FF) & g)
UpperCAmelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
UpperCAmelCase = self.ror(A ,2 ) ^ self.ror(A ,13 ) ^ self.ror(A ,22 )
UpperCAmelCase = (a & b) ^ (a & c) ^ (b & c)
UpperCAmelCase = (sa + maj) % 0x1_00_00_00_00
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
UpperCAmelCase = [a, b, c, d, e, f, g, h]
# Modify final values
UpperCAmelCase = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
UpperCAmelCase = """""".join([hex(A )[2:].zfill(8 ) for value in self.hashes] )
def _UpperCamelCase ( self ,A ,A ):
return 0xFF_FF_FF_FF & (value << (32 - rotations)) | (value >> rotations)
class lowerCamelCase__ ( unittest.TestCase ):
def _UpperCamelCase ( self ):
import hashlib
UpperCAmelCase = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(A ).hash ,hashlib.shaaaa(A ).hexdigest() )
def _a ( ):
"""simple docstring"""
import doctest
doctest.testmod()
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
UpperCAmelCase = f.read()
else:
UpperCAmelCase = bytes(_snake_case , """utf-8""" )
print(SHAaaa(_snake_case ).hash )
if __name__ == "__main__":
main()
| 74 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[int] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_lowercase ):
self.assertDictEqual(_lowercase , example_records[i] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : List[Any] ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self : int ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] )
self.assertEqual(len(_lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 35 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase ( unittest.TestCase ):
lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase : Any = ['''accelerate''', '''launch''']
lowerCamelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase : Optional[int] = '''default_config.yaml'''
lowerCamelCase : Optional[Any] = config_folder / config_file
lowerCamelCase : Optional[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Any ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Tuple ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowercase ( unittest.TestCase ):
lowerCamelCase : str = '''test-tpu'''
lowerCamelCase : Tuple = '''us-central1-a'''
lowerCamelCase : Optional[int] = '''ls'''
lowerCamelCase : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase : Tuple = '''cd /usr/share'''
lowerCamelCase : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowercase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
| 35 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class UpperCAmelCase ( unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
snake_case_ = load_tool("text-classification" )
self.tool.setup()
snake_case_ = load_tool("text-classification" , remote=__lowercase )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__lowercase , "positive" )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(__lowercase , "positive" )
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
snake_case_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__lowercase , "positive" )
def snake_case__ ( self : Tuple ):
"""simple docstring"""
snake_case_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(__lowercase , "positive" )
| 139 |
def lowerCamelCase__ ( _A = 600851475143 ):
'''simple docstring'''
try:
snake_case_ = int(_A )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
snake_case_ = 2
snake_case_ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
snake_case_ = i
while n % i == 0:
snake_case_ = n // i
i += 1
return int(_A )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 139 | 1 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ) ->List[str]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
snake_case__ = nn.Parameter(UpperCAmelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
snake_case__ = nn.Parameter(UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
# set torch weights for 1-to-1 comparison
snake_case__ = np.asarray(weights[0] )
snake_case__ = np.asarray(weights[1] )
snake_case__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCAmelCase_ ).view(-1 , UpperCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
# set torch weights for 1-to-1 comparison
snake_case__ = np.asarray(weights[0] )
snake_case__ = np.asarray(weights[1] )
snake_case__ = np.asarray(weights[2] )
snake_case__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCAmelCase_ ).view(-1 , UpperCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Dict:
# layernorm 1
snake_case__ = weights[0][0][0]
snake_case__ = np.asarray(layer_norm_a[0] )
snake_case__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCAmelCase_ ) , torch.tensor(UpperCAmelCase_ ) , )
# lsh weights + output
snake_case__ = weights[0][1]
if len(UpperCAmelCase_ ) < 4:
set_layer_weights_in_torch_lsh(UpperCAmelCase_ , torch_block.attention , UpperCAmelCase_ )
else:
set_layer_weights_in_torch_local(UpperCAmelCase_ , torch_block.attention , UpperCAmelCase_ )
# intermediate weighs
snake_case__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCAmelCase_ ) == 4:
snake_case__ = intermediate_weights[2]
# layernorm 2
snake_case__ = np.asarray(intermediate_weights[0][0] )
snake_case__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCAmelCase_ ) , torch.tensor(UpperCAmelCase_ ) , )
# intermediate dense
snake_case__ = np.asarray(intermediate_weights[1][0] )
snake_case__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase_ ) , )
# intermediate out
snake_case__ = np.asarray(intermediate_weights[4][0] )
snake_case__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase_ ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Union[str, Any]:
# reformer model
snake_case__ = torch_model.reformer
# word embeds
snake_case__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCAmelCase_ ) , )
if isinstance(weights[3] , UpperCAmelCase_ ):
snake_case__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
snake_case__ = nn.Parameter(torch.tensor(UpperCAmelCase_ ) )
snake_case__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCAmelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# output layer norm
snake_case__ = np.asarray(weights[7][0] )
snake_case__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCAmelCase_ ) , torch.tensor(UpperCAmelCase_ ) , )
# output embeddings
snake_case__ = np.asarray(weights[9][0] )
snake_case__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase_ ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
# Initialise PyTorch model
snake_case__ = ReformerConfig.from_json_file(UpperCAmelCase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
snake_case__ = ReformerModelWithLMHead(UpperCAmelCase_ )
with open(UpperCAmelCase_ , 'rb' ) as f:
snake_case__ = pickle.load(UpperCAmelCase_ )['weights']
set_model_weights_in_torch(UpperCAmelCase_ , UpperCAmelCase_ , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a__ : Dict = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 368 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ) ->List[str]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
snake_case__ = nn.Parameter(UpperCAmelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
snake_case__ = nn.Parameter(UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
# set torch weights for 1-to-1 comparison
snake_case__ = np.asarray(weights[0] )
snake_case__ = np.asarray(weights[1] )
snake_case__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCAmelCase_ ).view(-1 , UpperCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
# set torch weights for 1-to-1 comparison
snake_case__ = np.asarray(weights[0] )
snake_case__ = np.asarray(weights[1] )
snake_case__ = np.asarray(weights[2] )
snake_case__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCAmelCase_ ).view(-1 , UpperCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Dict:
# layernorm 1
snake_case__ = weights[0][0][0]
snake_case__ = np.asarray(layer_norm_a[0] )
snake_case__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCAmelCase_ ) , torch.tensor(UpperCAmelCase_ ) , )
# lsh weights + output
snake_case__ = weights[0][1]
if len(UpperCAmelCase_ ) < 4:
set_layer_weights_in_torch_lsh(UpperCAmelCase_ , torch_block.attention , UpperCAmelCase_ )
else:
set_layer_weights_in_torch_local(UpperCAmelCase_ , torch_block.attention , UpperCAmelCase_ )
# intermediate weighs
snake_case__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCAmelCase_ ) == 4:
snake_case__ = intermediate_weights[2]
# layernorm 2
snake_case__ = np.asarray(intermediate_weights[0][0] )
snake_case__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCAmelCase_ ) , torch.tensor(UpperCAmelCase_ ) , )
# intermediate dense
snake_case__ = np.asarray(intermediate_weights[1][0] )
snake_case__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase_ ) , )
# intermediate out
snake_case__ = np.asarray(intermediate_weights[4][0] )
snake_case__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase_ ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Union[str, Any]:
# reformer model
snake_case__ = torch_model.reformer
# word embeds
snake_case__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCAmelCase_ ) , )
if isinstance(weights[3] , UpperCAmelCase_ ):
snake_case__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
snake_case__ = nn.Parameter(torch.tensor(UpperCAmelCase_ ) )
snake_case__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCAmelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# output layer norm
snake_case__ = np.asarray(weights[7][0] )
snake_case__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCAmelCase_ ) , torch.tensor(UpperCAmelCase_ ) , )
# output embeddings
snake_case__ = np.asarray(weights[9][0] )
snake_case__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase_ ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
# Initialise PyTorch model
snake_case__ = ReformerConfig.from_json_file(UpperCAmelCase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
snake_case__ = ReformerModelWithLMHead(UpperCAmelCase_ )
with open(UpperCAmelCase_ , 'rb' ) as f:
snake_case__ = pickle.load(UpperCAmelCase_ )['weights']
set_model_weights_in_torch(UpperCAmelCase_ , UpperCAmelCase_ , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a__ : Dict = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 368 | 1 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
__UpperCAmelCase : Dict = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase)
class UpperCAmelCase_ ( __UpperCAmelCase):
'''simple docstring'''
def __init__( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
requires_backends(self , '''vision''' )
requires_backends(self , '''torch''' )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(lowerCAmelCase_ )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = {}
UpperCamelCase : Dict = {}
UpperCamelCase : Any = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCamelCase : int = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
UpperCamelCase : List[Any] = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
UpperCamelCase : List[str] = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
UpperCamelCase : Any = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
UpperCamelCase : Tuple = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCamelCase : Union[str, Any] = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
UpperCamelCase : List[str] = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
UpperCamelCase : Any = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
UpperCamelCase : Union[str, Any] = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
UpperCamelCase : Dict = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
UpperCamelCase : List[str] = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
UpperCamelCase : Any = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return super().__call__(lowerCAmelCase_ , *lowerCAmelCase_ , num_workers=lowerCAmelCase_ , batch_size=lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = 512 / 1_500 , __SCREAMING_SNAKE_CASE = 32 , __SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = load_image(lowerCAmelCase_ )
UpperCamelCase : Dict = self.image_processor.size['''longest_edge''']
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = self.image_processor.generate_crop_boxes(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCamelCase : str = self.image_processor(images=lowerCAmelCase_ , return_tensors='''pt''' )
with self.device_placement():
if self.framework == "pt":
UpperCamelCase : Any = self.get_inference_context()
with inference_context():
UpperCamelCase : Tuple = self._ensure_tensor_on_device(lowerCAmelCase_ , device=self.device )
UpperCamelCase : List[Any] = self.model.get_image_embeddings(model_inputs.pop('''pixel_values''' ) )
UpperCamelCase : int = image_embeddings
UpperCamelCase : Optional[Any] = grid_points.shape[1]
UpperCamelCase : List[Any] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''' )
for i in range(0 , lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCamelCase : Tuple = grid_points[:, i : i + points_per_batch, :, :]
UpperCamelCase : Optional[int] = input_labels[:, i : i + points_per_batch]
UpperCamelCase : Any = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0.88 , __SCREAMING_SNAKE_CASE=0.95 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = model_inputs.pop('''input_boxes''' )
UpperCamelCase : Dict = model_inputs.pop('''is_last''' )
UpperCamelCase : Union[str, Any] = model_inputs.pop('''original_sizes''' ).tolist()
UpperCamelCase : Union[str, Any] = model_inputs.pop('''reshaped_input_sizes''' ).tolist()
UpperCamelCase : Union[str, Any] = self.model(**lowerCAmelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCamelCase : Optional[Any] = model_outputs['''pred_masks''']
UpperCamelCase : str = self.image_processor.post_process_masks(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , binarize=lowerCAmelCase_ )
UpperCamelCase : List[Any] = model_outputs['''iou_scores''']
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=0.7 , ):
"""simple docstring"""
UpperCamelCase : str = []
UpperCamelCase : str = []
UpperCamelCase : Any = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores''' ) )
all_masks.extend(model_output.pop('''masks''' ) )
all_boxes.append(model_output.pop('''boxes''' ) )
UpperCamelCase : Union[str, Any] = torch.cat(lowerCAmelCase_ )
UpperCamelCase : Any = torch.cat(lowerCAmelCase_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = self.image_processor.post_process_for_mask_generation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCamelCase : Dict = defaultdict(lowerCAmelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCAmelCase_ )
UpperCamelCase : Tuple = {}
if output_rle_mask:
UpperCamelCase : Optional[Any] = rle_mask
if output_bboxes_mask:
UpperCamelCase : int = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 712 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : Optional[int] = True
__UpperCamelCase : Optional[int] = DebertaTokenizerFast
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase : Optional[int] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCamelCase : Tuple = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
UpperCamelCase : Any = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCamelCase : List[Any] = {'''unk_token''': '''[UNK]'''}
UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = '''lower newer'''
return input_text, output_text
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : int = '''lower newer'''
UpperCamelCase : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCamelCase : Tuple = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = tokens + [tokenizer.unk_token]
UpperCamelCase : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : Optional[Any] = tokenizer('''Hello''' , '''World''' )
UpperCamelCase : List[str] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCamelCase : Optional[int] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCamelCase : str = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCamelCase : Union[str, Any] = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCamelCase : int = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase : List[str] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 643 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCamelCase( _a ):
snake_case_ : Tuple = """openai/whisper-base"""
snake_case_ : Optional[Any] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
snake_case_ : Dict = """transcriber"""
snake_case_ : List[Any] = WhisperProcessor
snake_case_ : int = WhisperForConditionalGeneration
snake_case_ : int = ["""audio"""]
snake_case_ : Union[str, Any] = ["""text"""]
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
'''simple docstring'''
return self.pre_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_features
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
'''simple docstring'''
return self.model.generate(inputs=SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
'''simple docstring'''
return self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )[0]
| 371 |
from __future__ import annotations
class UpperCamelCase:
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ) -> str:
'''simple docstring'''
__snake_case , __snake_case = text, pattern
__snake_case , __snake_case = len(SCREAMING_SNAKE_CASE ), len(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : int ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> list[int]:
'''simple docstring'''
__snake_case = []
for i in range(self.textLen - self.patLen + 1 ):
__snake_case = self.mismatch_in_text(SCREAMING_SNAKE_CASE )
if mismatch_index == -1:
positions.append(SCREAMING_SNAKE_CASE )
else:
__snake_case = self.match_in_pattern(self.text[mismatch_index] )
__snake_case = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A : str = 'ABAABA'
A : Optional[Any] = 'AB'
A : Any = BoyerMooreSearch(text, pattern)
A : Tuple = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 371 | 1 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->Optional[Any]:
if isinstance(_A, collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __SCREAMING_SNAKE_CASE :
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[Any] , snake_case : Dict ):
'''simple docstring'''
pass
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
pass
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
pass
def _UpperCamelCase ( self : List[str] , snake_case : Tuple , snake_case : int , snake_case : Optional[int] ):
'''simple docstring'''
A__ : Union[str, Any] = np.abs((a - b) ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , F'Difference between torch and flax is {diff} (>= {tol}).' )
def _UpperCamelCase ( self : List[str] , snake_case : Dict , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : List[str]=None , **snake_case : Dict ):
'''simple docstring'''
A__ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A__ : int = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
A__ : str = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Dict , snake_case : Union[str, Any]=None , **snake_case : Tuple ):
'''simple docstring'''
A__ , A__ : Union[str, Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A__ : Dict = {"""vision_model""": vision_model, """text_model""": text_model}
A__ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
A__ : Optional[Any] = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _UpperCamelCase ( self : Tuple , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Optional[Any]=None , **snake_case : Dict ):
'''simple docstring'''
A__ , A__ : Any = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A__ : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
A__ : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
A__ : Dict = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
A__ : int = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
A__ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A__ : Tuple = model(input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
A__ : int = after_output[0]
A__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-3 )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Tuple , snake_case : Dict , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Optional[int]=None , **snake_case : int ):
'''simple docstring'''
A__ , A__ : Optional[Any] = self.get_vision_text_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A__ : str = {"""vision_model""": vision_model, """text_model""": text_model}
A__ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_SCREAMING_SNAKE_CASE )
A__ : Tuple = model(
input_ids=_SCREAMING_SNAKE_CASE , pixel_values=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
A__ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ : Any = to_atuple(vision_model.config.image_size )
A__ : int = to_atuple(vision_model.config.patch_size )
A__ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A__ : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
A__ : List[Any] = output.text_model_output.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : str , snake_case : List[str] , snake_case : str ):
'''simple docstring'''
pt_model.to(_SCREAMING_SNAKE_CASE )
pt_model.eval()
# prepare inputs
A__ : Union[str, Any] = inputs_dict
A__ : Any = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
A__ : Optional[int] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
A__ : str = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
A__ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
A__ : Dict = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
A__ : str = VisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
pt_model_loaded.to(_SCREAMING_SNAKE_CASE )
pt_model_loaded.eval()
with torch.no_grad():
A__ : List[Any] = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_SCREAMING_SNAKE_CASE , pt_output_loaded.numpy() , 4e-2 )
def _UpperCamelCase ( self : str , snake_case : List[str] , snake_case : Any , snake_case : Union[str, Any] ):
'''simple docstring'''
A__ : Optional[Any] = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A__ : List[str] = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
A__ : Union[str, Any] = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
A__ : Union[str, Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
A__ : int = fx_state
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self : Tuple , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int ):
'''simple docstring'''
A__ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A__ : Optional[Any] = VisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
A__ : Dict = FlaxVisionTextDualEncoderModel(_SCREAMING_SNAKE_CASE )
A__ : str = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
self.check_pt_flax_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Tuple = self.prepare_config_and_inputs()
self.check_save_load(**_SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Tuple = self.prepare_config_and_inputs()
A__ : int = config_inputs_dict.pop("""vision_config""" )
A__ : str = config_inputs_dict.pop("""text_config""" )
A__ : Optional[int] = config_inputs_dict
self.check_equivalence_pt_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.check_equivalence_flax_to_pt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ , A__ : Tuple = self.get_pretrained_model_and_inputs()
A__ : Union[str, Any] = model_a(**_SCREAMING_SNAKE_CASE )
A__ : str = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_SCREAMING_SNAKE_CASE )
A__ : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(_SCREAMING_SNAKE_CASE )
A__ : str = model_a(**_SCREAMING_SNAKE_CASE )
A__ : Union[str, Any] = after_outputs[0]
A__ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1e-5 )
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase ):
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
A__ : Optional[int] = 13
A__ : Tuple = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
A__ : str = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
A__ : Optional[int] = random_attention_mask([batch_size, 4] )
A__ : Dict = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _UpperCamelCase ( self : Union[str, Any] , snake_case : List[str] , snake_case : Optional[int] ):
'''simple docstring'''
A__ : List[Any] = FlaxViTModel(_SCREAMING_SNAKE_CASE )
A__ : Any = FlaxBertModel(_SCREAMING_SNAKE_CASE )
return vision_model, text_model
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : List[str] = FlaxViTModelTester(self )
A__ : Tuple = FlaxBertModelTester(self )
A__ : Optional[int] = vit_model_tester.prepare_config_and_inputs()
A__ : str = bert_model_tester.prepare_config_and_inputs()
A__ , A__ : Optional[Any] = vision_config_and_inputs
A__ , A__ , A__ , A__ : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , unittest.TestCase ):
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=_SCREAMING_SNAKE_CASE , text_from_pt=_SCREAMING_SNAKE_CASE , )
A__ : List[str] = 13
A__ : int = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
A__ : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
A__ : str = random_attention_mask([batch_size, 4] )
A__ : Any = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _UpperCamelCase ( self : Tuple , snake_case : int , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = FlaxCLIPVisionModel(_SCREAMING_SNAKE_CASE )
A__ : List[str] = FlaxBertModel(_SCREAMING_SNAKE_CASE )
return vision_model, text_model
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : int = FlaxCLIPVisionModelTester(self )
A__ : str = FlaxBertModelTester(self )
A__ : Optional[int] = clip_model_tester.prepare_config_and_inputs()
A__ : Optional[int] = bert_model_tester.prepare_config_and_inputs()
A__ , A__ : List[Any] = vision_config_and_inputs
A__ , A__ , A__ , A__ : Optional[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
A__ : str = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
A__ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A__ : Any = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="""np""" )
A__ : List[str] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
A__ : Optional[Any] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 709 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Union[str, Any]=None ) ->Tuple:
A__ : Dict = None
if token is not None:
A__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
A__ : Dict = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
A__ : Any = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__ ).json()
A__ : Tuple = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
A__ : Optional[Any] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(UpperCAmelCase__ ):
A__ : str = requests.get(url + f'&page={i + 2}', headers=UpperCAmelCase__ ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : str=None ) ->List[str]:
A__ : Optional[Any] = None
if token is not None:
A__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
A__ : str = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
A__ : Dict = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__ ).json()
A__ : Any = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
A__ : Union[str, Any] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(UpperCAmelCase__ ):
A__ : Union[str, Any] = requests.get(url + f'&page={i + 2}', headers=UpperCAmelCase__ ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any ) ->Tuple:
A__ : Tuple = None
if token is not None:
A__ : List[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'Bearer {token}'}
A__ : Tuple = requests.get(UpperCAmelCase__, headers=UpperCAmelCase__, allow_redirects=UpperCAmelCase__ )
A__ : Dict = result.headers["""Location"""]
A__ : Union[str, Any] = requests.get(UpperCAmelCase__, allow_redirects=UpperCAmelCase__ )
A__ : int = os.path.join(UpperCAmelCase__, f'{artifact_name}.zip' )
with open(UpperCAmelCase__, """wb""" ) as fp:
fp.write(response.content )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple=None ) ->Tuple:
A__ : int = []
A__ : Union[str, Any] = []
A__ : Optional[Any] = None
with zipfile.ZipFile(UpperCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(UpperCAmelCase__ ) as f:
for line in f:
A__ : int = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
A__ : List[str] = line[: line.index(""": """ )]
A__ : str = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
A__ : Any = line[len("""FAILED """ ) :]
failed_tests.append(UpperCAmelCase__ )
elif filename == "job_name.txt":
A__ : Any = line
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
f'`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCAmelCase__ )} for `errors` '
f'and {len(UpperCAmelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
""" problem.""" )
A__ : List[str] = None
if job_name and job_links:
A__ : Any = job_links.get(UpperCAmelCase__, UpperCAmelCase__ )
# A list with elements of the form (line of error, error, failed test)
A__ : str = [x + [y] + [job_link] for x, y in zip(UpperCAmelCase__, UpperCAmelCase__ )]
return result
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int=None ) ->str:
A__ : List[Any] = []
A__ : Dict = [os.path.join(UpperCAmelCase__, UpperCAmelCase__ ) for p in os.listdir(UpperCAmelCase__ ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(UpperCAmelCase__, job_links=UpperCAmelCase__ ) )
return errors
def _lowerCAmelCase ( UpperCAmelCase__ : Dict, UpperCAmelCase__ : Dict=None ) ->List[Any]:
A__ : Dict = Counter()
counter.update([x[1] for x in logs] )
A__ : str = counter.most_common()
A__ : Dict = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
A__ : Optional[int] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
A__ : List[str] = dict(sorted(r.items(), key=lambda UpperCAmelCase__ : item[1]["count"], reverse=UpperCAmelCase__ ) )
return r
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any] ) ->str:
A__ : List[str] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
A__ : Union[str, Any] = test.split("""/""" )[2]
else:
A__ : int = None
return test
def _lowerCAmelCase ( UpperCAmelCase__ : str, UpperCAmelCase__ : str=None ) ->Optional[Any]:
A__ : Any = [(x[0], x[1], get_model(x[2] )) for x in logs]
A__ : List[Any] = [x for x in logs if x[2] is not None]
A__ : Union[str, Any] = {x[2] for x in logs}
A__ : int = {}
for test in tests:
A__ : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
A__ : Any = counter.most_common()
A__ : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
A__ : List[str] = sum(error_counts.values() )
if n_errors > 0:
A__ : str = {"""count""": n_errors, """errors""": error_counts}
A__ : Dict = dict(sorted(r.items(), key=lambda UpperCAmelCase__ : item[1]["count"], reverse=UpperCAmelCase__ ) )
return r
def _lowerCAmelCase ( UpperCAmelCase__ : Dict ) ->List[Any]:
A__ : List[Any] = """| no. | error | status |"""
A__ : Union[str, Any] = """|-:|:-|:-|"""
A__ : Dict = [header, sep]
for error in reduced_by_error:
A__ : List[Any] = reduced_by_error[error]["""count"""]
A__ : List[Any] = f'| {count} | {error[:1_0_0]} | |'
lines.append(UpperCAmelCase__ )
return "\n".join(UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : int ) ->int:
A__ : str = """| model | no. of errors | major error | count |"""
A__ : Optional[int] = """|-:|-:|-:|-:|"""
A__ : Tuple = [header, sep]
for model in reduced_by_model:
A__ : Optional[Any] = reduced_by_model[model]["""count"""]
A__ , A__ : Optional[Any] = list(reduced_by_model[model]["""errors"""].items() )[0]
A__ : Optional[int] = f'| {model} | {count} | {error[:6_0]} | {_count} |'
lines.append(UpperCAmelCase__ )
return "\n".join(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
A_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A_ = get_job_links(args.workflow_run_id, token=args.token)
A_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A_ = k.find(''' / ''')
A_ = k[index + len(''' / ''') :]
A_ = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A_ = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A_ = reduce_by_error(errors)
A_ = reduce_by_model(errors)
A_ = make_github_table(reduced_by_error)
A_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 498 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.