code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from itertools import permutations
def __magic_name__ ( _lowerCamelCase: tuple ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(_lowerCamelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __magic_name__ ( _lowerCamelCase: int = 10 ) -> int:
'''simple docstring'''
return sum(
int(''''''.join(map(_lowerCamelCase, _lowerCamelCase ) ) )
for num in permutations(range(_lowerCamelCase ) )
if is_substring_divisible(_lowerCamelCase ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 535 |
"""simple docstring"""
UpperCAmelCase = 8.3_144_598
def __magic_name__ ( _lowerCamelCase: float, _lowerCamelCase: float ) -> float:
'''simple docstring'''
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
UpperCAmelCase = 3_0_0
UpperCAmelCase = 2_8
UpperCAmelCase = rms_speed_of_molecule(temperature, molar_mass)
print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 535 | 1 |
'''simple docstring'''
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __UpperCAmelCase ( __a ):
def __init__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
super().__init__(
features=_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase , streaming=_lowerCamelCase , num_proc=_lowerCamelCase , **_lowerCamelCase , )
lowerCAmelCase_ = Generator(
cache_dir=_lowerCamelCase , features=_lowerCamelCase , generator=_lowerCamelCase , gen_kwargs=_lowerCamelCase , **_lowerCamelCase , )
def UpperCAmelCase_ ( self ):
# Build iterable dataset
if self.streaming:
lowerCAmelCase_ = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
lowerCAmelCase_ = None
self.builder.download_and_prepare(
download_config=_lowerCamelCase , download_mode=_lowerCamelCase , verification_mode=_lowerCamelCase , base_path=_lowerCamelCase , num_proc=self.num_proc , )
lowerCAmelCase_ = self.builder.as_dataset(
split='''train''' , verification_mode=_lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 717 | '''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
A_ : Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(__a )
class __UpperCAmelCase ( __a ):
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
requires_backends(self , '''decord''' )
self.check_model_type(_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None ):
lowerCAmelCase_ = {}
if frame_sampling_rate is not None:
lowerCAmelCase_ = frame_sampling_rate
if num_frames is not None:
lowerCAmelCase_ = num_frames
lowerCAmelCase_ = {}
if top_k is not None:
lowerCAmelCase_ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _lowerCamelCase , **_lowerCamelCase ):
return super().__call__(_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=1 ):
if num_frames is None:
lowerCAmelCase_ = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
lowerCAmelCase_ = BytesIO(requests.get(_lowerCamelCase ).content )
lowerCAmelCase_ = VideoReader(_lowerCamelCase )
videoreader.seek(0 )
lowerCAmelCase_ = 0
lowerCAmelCase_ = num_frames * frame_sampling_rate - 1
lowerCAmelCase_ = np.linspace(_lowerCamelCase , _lowerCamelCase , num=_lowerCamelCase , dtype=np.intaa )
lowerCAmelCase_ = videoreader.get_batch(_lowerCamelCase ).asnumpy()
lowerCAmelCase_ = list(_lowerCamelCase )
lowerCAmelCase_ = self.image_processor(_lowerCamelCase , return_tensors=self.framework )
return model_inputs
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = self.model(**_lowerCamelCase )
return model_outputs
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=5 ):
if top_k > self.model.config.num_labels:
lowerCAmelCase_ = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ = model_outputs.logits.softmax(-1 )[0]
lowerCAmelCase_ ,lowerCAmelCase_ = probs.topk(_lowerCamelCase )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowerCAmelCase_ = scores.tolist()
lowerCAmelCase_ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_lowerCamelCase , _lowerCamelCase )]
| 606 | 0 |
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ :
def __init__( self : List[Any],_A : Dict,_A : List[str]=13,_A : List[Any]=7,_A : Dict=True,_A : Optional[Any]=True,_A : Optional[Any]=False,_A : Optional[int]=True,_A : str=99,_A : Tuple=32,_A : Any=5,_A : Tuple=4,_A : Optional[int]=37,_A : Tuple="gelu",_A : Any=0.1,_A : int=0.1,_A : List[str]=512,_A : List[str]=16,_A : List[str]=2,_A : Optional[Any]=0.02,_A : Tuple=3,_A : Tuple=4,_A : Any=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : Tuple = use_input_mask
SCREAMING_SNAKE_CASE_ : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Any = use_labels
SCREAMING_SNAKE_CASE_ : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Any = type_vocab_size
SCREAMING_SNAKE_CASE_ : Tuple = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : str = initializer_range
SCREAMING_SNAKE_CASE_ : str = num_labels
SCREAMING_SNAKE_CASE_ : str = num_choices
SCREAMING_SNAKE_CASE_ : Tuple = scope
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : List[str] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=_A,initializer_range=self.initializer_range,)
def __UpperCamelCase ( self : int,_A : Union[str, Any],_A : Optional[Any],_A : str,_A : int,_A : Union[str, Any],_A : List[Any],_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = BioGptModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(_A,attention_mask=_A )
SCREAMING_SNAKE_CASE_ : Any = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Optional[Any],_A : Tuple,_A : Optional[Any],_A : int,_A : str,_A : Any,_A : Optional[int],_A : Optional[Any],_A : str,_A : Optional[int],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = BioGptForCausalLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(_A,attention_mask=_A,token_type_ids=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : str,_A : str,_A : List[str],_A : Any,_A : Tuple,_A : str,*_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = BioGptModel(config=_A )
model.to(_A )
model.eval()
# create attention mask
SCREAMING_SNAKE_CASE_ : List[str] = torch.ones(input_ids.shape,dtype=torch.long,device=_A )
SCREAMING_SNAKE_CASE_ : str = self.seq_length // 2
SCREAMING_SNAKE_CASE_ : str = 0
# first forward pass
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A,attention_mask=_A ).to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : int = ids_tensor((self.batch_size, 1),config.vocab_size )
# change a random masked slice from input_ids
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor((1,),_A ).item() + 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor((self.batch_size, 1),config.vocab_size ).squeeze(-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = random_other_next_tokens
# append to next input_ids and attn_mask
SCREAMING_SNAKE_CASE_ : Tuple = torch.cat([input_ids, next_tokens],dim=-1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1),dtype=torch.long,device=_A )],dim=1,)
# get two different outputs
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A,attention_mask=_A )["last_hidden_state"]
SCREAMING_SNAKE_CASE_ : Any = model(_A,past_key_values=_A,attention_mask=_A )["last_hidden_state"]
# select random slice
SCREAMING_SNAKE_CASE_ : Any = ids_tensor((1,),output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : List[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A,_A,atol=1E-3 ) )
def __UpperCamelCase ( self : List[str],_A : List[Any],_A : Optional[int],_A : Optional[int],_A : Optional[Any],_A : Tuple,*_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = BioGptModel(config=_A ).to(_A ).eval()
SCREAMING_SNAKE_CASE_ : Dict = torch.ones(input_ids.shape,dtype=torch.long,device=_A )
# first forward pass
SCREAMING_SNAKE_CASE_ : str = model(_A,attention_mask=_A,use_cache=_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor((self.batch_size, 3),config.vocab_size )
SCREAMING_SNAKE_CASE_ : List[str] = ids_tensor((self.batch_size, 3),2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ : Any = torch.cat([input_ids, next_tokens],dim=-1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat([attention_mask, next_attn_mask],dim=-1 )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A )["last_hidden_state"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A,attention_mask=_A,past_key_values=_A )[
"last_hidden_state"
]
# select random slice
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor((1,),output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_A,_A,atol=1E-3 ) )
def __UpperCamelCase ( self : Any,_A : Union[str, Any],_A : Union[str, Any],_A : Dict,_A : Optional[int],_A : Any,*_A : int,_A : Optional[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = BioGptForCausalLM(_A )
model.to(_A )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
SCREAMING_SNAKE_CASE_ : Tuple = model(_A,labels=_A )
self.parent.assertEqual(result.loss.shape,() )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __UpperCamelCase ( self : Optional[int],_A : Optional[int],*_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BioGptModel(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ),0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ),0.01 )
def __UpperCamelCase ( self : Any,_A : int,_A : int,_A : Union[str, Any],_A : int,_A : Any,*_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.num_labels
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BioGptForTokenClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,token_type_ids=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
A = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
A = (BioGptForCausalLM,) if is_torch_available() else ()
A = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = BioGptModelTester(self )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ConfigTester(self,config_class=_A,hidden_size=37 )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ : str = type
self.model_tester.create_and_check_model(*_A )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_A,gradient_checkpointing=_A )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_A )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_A )
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(_A )
SCREAMING_SNAKE_CASE_ : Dict = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
SCREAMING_SNAKE_CASE_ : Any = "left"
# Define PAD Token = EOS Token = 50256
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.eos_token
SCREAMING_SNAKE_CASE_ : Dict = model.config.eos_token_id
# use different length sentences to test batching
SCREAMING_SNAKE_CASE_ : Optional[int] = [
"Hello, my dog is a little",
"Today, I",
]
SCREAMING_SNAKE_CASE_ : Any = tokenizer(_A,return_tensors="pt",padding=_A )
SCREAMING_SNAKE_CASE_ : int = inputs["input_ids"].to(_A )
SCREAMING_SNAKE_CASE_ : str = model.generate(
input_ids=_A,attention_mask=inputs["attention_mask"].to(_A ),)
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer(sentences[0],return_tensors="pt" ).input_ids.to(_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = model.generate(input_ids=_A )
SCREAMING_SNAKE_CASE_ : int = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
SCREAMING_SNAKE_CASE_ : Dict = tokenizer(sentences[1],return_tensors="pt" ).input_ids.to(_A )
SCREAMING_SNAKE_CASE_ : int = model.generate(input_ids=_A,max_length=model.config.max_length - num_paddings )
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.batch_decode(_A,skip_special_tokens=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.decode(output_non_padded[0],skip_special_tokens=_A )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.decode(output_padded[0],skip_special_tokens=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(_A,_A )
self.assertListEqual(_A,[non_padded_sentence, padded_sentence] )
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Any = BioGptModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3
SCREAMING_SNAKE_CASE_ : Optional[int] = input_dict["input_ids"]
SCREAMING_SNAKE_CASE_ : Any = input_ids.ne(1 ).to(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.model_tester.batch_size],self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Tuple = BioGptForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A,attention_mask=_A,labels=_A )
self.assertEqual(result.logits.shape,(self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Any = 3
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "multi_label_classification"
SCREAMING_SNAKE_CASE_ : Any = input_dict["input_ids"]
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.ne(1 ).to(_A )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels],self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ : Dict = BioGptForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,attention_mask=_A,labels=_A )
self.assertEqual(result.logits.shape,(self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([[2, 4805, 9, 656, 21]] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Any = 4_2384
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3],_A,atol=1E-4 ) )
@slow
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
SCREAMING_SNAKE_CASE_ : List[str] = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(_A )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer("COVID-19 is",return_tensors="pt" ).to(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.generate(
**_A,min_length=100,max_length=1024,num_beams=5,early_stopping=_A,)
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(output_ids[0],skip_special_tokens=_A )
SCREAMING_SNAKE_CASE_ : Tuple = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(_A,_A )
| 216 | import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__lowerCamelCase : str = get_logger(__name__)
class a__ ( enum.Enum ):
A = 'all_checks'
A = 'basic_checks'
A = 'no_checks'
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
def _snake_case ( lowerCAmelCase : Optional[dict] , lowerCAmelCase : dict , lowerCAmelCase : List[Any]=None ):
"""simple docstring"""
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE_ : int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE_ : List[str] = " for " + verification_name if verification_name is not None else ""
if len(lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
f'Checksums didn\'t match{for_verification_name}:\n'
f'{bad_urls}\n'
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
class a__ ( A__ ):
pass
def _snake_case ( lowerCAmelCase : Optional[dict] , lowerCAmelCase : dict ):
"""simple docstring"""
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
if len(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(lowerCAmelCase ) - set(lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE_ : Tuple = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(lowerCAmelCase ) )
logger.info("All the splits matched successfully." )
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : bool = True ):
"""simple docstring"""
if record_checksum:
SCREAMING_SNAKE_CASE_ : int = shaaaa()
with open(lowerCAmelCase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , B"" ):
m.update(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = m.hexdigest()
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
return {"num_bytes": os.path.getsize(lowerCAmelCase ), "checksum": checksum}
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 216 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCamelCase( UpperCamelCase__ : int ) -> list[int]:
if num <= 0:
A : str = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(UpperCamelCase__ )
A : int = [True] * (num + 1)
A : Dict = []
A : str = 2
A : Any = int(math.sqrt(UpperCamelCase__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(UpperCamelCase__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , UpperCamelCase__ ):
if sieve[i] is True:
A : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(UpperCamelCase__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 537 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
snake_case_ = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def _lowerCamelCase( UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any=None ) -> List[Any]:
if rng is None:
A : int = random.Random()
A : int = 1
for dim in shape:
total_dims *= dim
A : Union[str, Any] = []
for _ in range(UpperCamelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
A : Union[str, Any] = np.array(UpperCamelCase__ , dtype=jnp.intaa ).reshape(UpperCamelCase__ )
return output
def _lowerCamelCase( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=None ) -> Dict:
A : Union[str, Any] = ids_tensor(UpperCamelCase__ , vocab_size=2 , rng=UpperCamelCase__ )
# make sure that at least one token is attended to for each batch
A : Any = 1
return attn_mask
@require_flax
class _lowercase :
_UpperCamelCase = None
_UpperCamelCase = ()
def snake_case ( self ):
A, A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
A : Dict = 2
A : List[str] = inputs['''input_ids'''].shape[-1] // 2
A : int = inputs['''input_ids'''][:max_batch_size, :sequence_length]
A : Optional[Any] = jnp.ones_like(_UpperCAmelCase )
A : int = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
A : Optional[int] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
A : str = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def snake_case ( self ):
A, A, A, A : Optional[Any] = self._get_input_ids_and_config()
A : Optional[int] = False
A : Union[str, Any] = max_length
A : str = 0
for model_class in self.all_generative_model_classes:
A : int = model_class(_UpperCAmelCase )
A : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
A : int = getattr(_UpperCAmelCase , _UpperCAmelCase )
A : Union[str, Any] = pt_model_class(_UpperCAmelCase ).eval()
A : Tuple = load_flax_weights_in_pytorch_model(_UpperCAmelCase , flax_model.params )
A : Union[str, Any] = flax_model.generate(_UpperCAmelCase ).sequences
A : Optional[int] = pt_model.generate(torch.tensor(_UpperCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
A : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : str = self._get_input_ids_and_config()
A : Optional[int] = False
A : Dict = max_length
for model_class in self.all_generative_model_classes:
A : Any = model_class(_UpperCAmelCase )
A : Dict = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : str = jit(model.generate )
A : List[Any] = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : List[Any] = self._get_input_ids_and_config()
A : List[str] = True
A : List[Any] = max_length
for model_class in self.all_generative_model_classes:
A : Any = model_class(_UpperCAmelCase )
A : Tuple = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : Tuple = jit(model.generate )
A : int = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : List[str] = self._get_input_ids_and_config()
A : Any = False
A : str = max_length
A : Optional[int] = 2
for model_class in self.all_generative_model_classes:
A : Dict = model_class(_UpperCAmelCase )
A : Any = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : List[Any] = jit(model.generate )
A : Optional[Any] = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : Optional[int] = self._get_input_ids_and_config()
A : Dict = False
A : List[Any] = max_length
A : Optional[int] = 2
A : int = 2
for model_class in self.all_generative_model_classes:
A : List[str] = model_class(_UpperCAmelCase )
A : List[str] = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def snake_case ( self ):
A, A, A, A : List[Any] = self._get_input_ids_and_config()
A : Any = True
A : Any = max_length
A : Any = 0.8
A : List[Any] = 10
A : List[Any] = 0.3
A : Union[str, Any] = 1
A : Any = 8
A : List[Any] = 9
for model_class in self.all_generative_model_classes:
A : List[Any] = model_class(_UpperCAmelCase )
A : Any = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : int = jit(model.generate )
A : List[Any] = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : Optional[Any] = self._get_input_ids_and_config()
A : int = max_length
A : Dict = 1
A : Tuple = 8
A : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
A : Dict = model_class(_UpperCAmelCase )
A : str = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : int = jit(model.generate )
A : Optional[int] = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : int = self._get_input_ids_and_config()
A : List[Any] = max_length
A : Optional[Any] = 2
A : Tuple = 1
A : List[Any] = 8
A : Dict = 9
for model_class in self.all_generative_model_classes:
A : Union[str, Any] = model_class(_UpperCAmelCase )
A : Any = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : Union[str, Any] = jit(model.generate )
A : int = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : Dict = self._get_input_ids_and_config()
# pad attention mask on the left
A : Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
A : int = False
A : List[Any] = max_length
for model_class in self.all_generative_model_classes:
A : List[str] = model_class(_UpperCAmelCase )
A : Dict = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : Dict = jit(model.generate )
A : Optional[Any] = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
A : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
A : Optional[int] = True
A : int = max_length
for model_class in self.all_generative_model_classes:
A : int = model_class(_UpperCAmelCase )
A : Union[str, Any] = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : Optional[Any] = jit(model.generate )
A : Dict = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
A : Optional[int] = attention_mask.at[(0, 0)].set(0 )
A : Any = 2
A : Any = max_length
for model_class in self.all_generative_model_classes:
A : Dict = model_class(_UpperCAmelCase )
A : Optional[Any] = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : str = jit(model.generate )
A : List[str] = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _lowercase ( unittest.TestCase ):
def snake_case ( self ):
A : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
A : Dict = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A : Union[str, Any] = '''Hello world'''
A : Union[str, Any] = tokenizer(_UpperCAmelCase , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_UpperCAmelCase , '''do_samples''' ):
model.generate(_UpperCAmelCase , do_samples=_UpperCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_UpperCAmelCase , '''foo''' ):
A : List[Any] = {'''foo''': '''bar'''}
model.generate(_UpperCAmelCase , **_UpperCAmelCase )
| 537 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : str = {
"""configuration_blenderbot""": [
"""BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotConfig""",
"""BlenderbotOnnxConfig""",
],
"""tokenization_blenderbot""": ["""BlenderbotTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = ["""BlenderbotTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
"""BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotForCausalLM""",
"""BlenderbotForConditionalGeneration""",
"""BlenderbotModel""",
"""BlenderbotPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
"""TFBlenderbotForConditionalGeneration""",
"""TFBlenderbotModel""",
"""TFBlenderbotPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
"""FlaxBlenderbotForConditionalGeneration""",
"""FlaxBlenderbotModel""",
"""FlaxBlenderbotPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 142 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline | 142 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( snake_case_ , unittest.TestCase ):
UpperCAmelCase__ = CLIPTokenizer
UpperCAmelCase__ = CLIPTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = {}
UpperCAmelCase__ = False
def _snake_case ( self : List[str] ) -> Any:
super().setUp()
# fmt: off
__magic_name__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__magic_name__ = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
__magic_name__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
__magic_name__ = {"unk_token": "<unk>"}
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def _snake_case ( self : List[str] , **__lowerCamelCase : Tuple ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _snake_case ( self : List[str] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def _snake_case ( self : str , __lowerCamelCase : int ) -> Tuple:
__magic_name__ = "lower newer"
__magic_name__ = "lower newer"
return input_text, output_text
def _snake_case ( self : Union[str, Any] ) -> int:
__magic_name__ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__magic_name__ = "lower newer"
__magic_name__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
__magic_name__ = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__magic_name__ = tokens + [tokenizer.unk_token]
__magic_name__ = [1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
@require_ftfy
def _snake_case ( self : Union[str, Any] ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
__magic_name__ = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
__magic_name__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
__magic_name__ = tokenizer_s.tokenize(__lowerCamelCase )
__magic_name__ = tokenizer_r.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__magic_name__ = "xa\u0303y" + " " + "x\xe3y"
__magic_name__ = tokenizer_s.tokenize(__lowerCamelCase )
__magic_name__ = tokenizer_r.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# Test that the tokenization is identical on unicode of space type
__magic_name__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__magic_name__ = tokenizer_s.tokenize(__lowerCamelCase )
__magic_name__ = tokenizer_r.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# Test that the tokenization is identical on unicode of line break type
__magic_name__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__magic_name__ = tokenizer_s.tokenize(__lowerCamelCase )
__magic_name__ = tokenizer_r.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : List[str] ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
__magic_name__ = f'''{text_of_1_token} {text_of_1_token}'''
__magic_name__ = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , )
__magic_name__ = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
__magic_name__ = f''' {text}'''
__magic_name__ = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , )
__magic_name__ = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
def _snake_case ( self : Optional[int] ) -> Dict:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(__lowerCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def _snake_case ( self : Tuple ) -> Any:
super().test_tokenization_python_rust_equals()
def _snake_case ( self : int ) -> Optional[int]:
# CLIP always lower cases letters
pass
| 468 |
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowercase = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
lowercase , lowercase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
lowercase = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
lowercase = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowercase = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 468 | 1 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase : list ):
'''simple docstring'''
__lowercase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowercase = True
for i in range(0 , len(__UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowercase , __lowercase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowercase = False
for i in range(1 , len(__UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowercase , __lowercase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowercase = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
snake_case : Dict = [int(x) for x in input().split()]
# inputing elements of the list in one line
snake_case : Dict = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 566 |
'''simple docstring'''
from string import ascii_uppercase
snake_case : List[str] = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowercase__ ( __UpperCamelCase : int , __UpperCamelCase : int ):
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
__lowercase = """"""
__lowercase = 0
__lowercase = 0
while div != 1:
__lowercase , __lowercase = divmod(__UpperCamelCase , __UpperCamelCase )
if base >= 11 and 9 < mod < 36:
__lowercase = ALPHABET_VALUES[str(__UpperCamelCase )]
else:
__lowercase = str(__UpperCamelCase )
new_value += actual_value
__lowercase = num // base
__lowercase = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__UpperCamelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 566 | 1 |
class __lowerCAmelCase : # Public class to implement a graph
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : int , _snake_case : int , _snake_case : list[list[bool]] ):
"""simple docstring"""
A__ = row
A__ = col
A__ = graph
def _a ( self : Optional[Any] , _snake_case : int , _snake_case : int , _snake_case : list[list[bool]] ):
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def _a ( self : int , _snake_case : int , _snake_case : int , _snake_case : list[list[bool]] ):
"""simple docstring"""
A__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
A__ = [-1, 0, 1, -1, 1, -1, 0, 1]
A__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ )
def _a ( self : Optional[int] ): # And finally, count all islands.
"""simple docstring"""
A__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
A__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += 1
return count
| 704 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
SCREAMING_SNAKE_CASE__ = f'https://www.google.com/search?q={query}&num=100'
SCREAMING_SNAKE_CASE__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
SCREAMING_SNAKE_CASE__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 52 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
lowerCamelCase : List[Any] =logging.get_logger(__name__)
lowerCamelCase : str ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase : str ={
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
lowerCamelCase : Any ={'''mobilebert-uncased''': 512}
lowerCamelCase : str ={}
class __a ( A__ ):
_lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
_lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : List[Any] = MobileBertTokenizer
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Optional[Any]="[UNK]" , SCREAMING_SNAKE_CASE : List[str]="[SEP]" , SCREAMING_SNAKE_CASE : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE : List[Any]="[CLS]" , SCREAMING_SNAKE_CASE : Union[str, Any]="[MASK]" , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Optional[Any]=None , **SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("strip_accents" , SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCamelCase__ : Tuple = getattr(SCREAMING_SNAKE_CASE , normalizer_state.pop("type" ) )
UpperCamelCase__ : Optional[Any] = do_lower_case
UpperCamelCase__ : Union[str, Any] = strip_accents
UpperCamelCase__ : Optional[int] = tokenize_chinese_chars
UpperCamelCase__ : int = normalizer_class(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = do_lower_case
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=None ):
'''simple docstring'''
UpperCamelCase__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = [self.sep_token_id]
UpperCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
UpperCamelCase__ : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE ) | 228 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase : List[Any] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ) -> str:
UpperCamelCase__ : Any = tesseract_config if tesseract_config is not None else ""
# apply OCR
UpperCamelCase__ : int = to_pil_image(__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ : Dict = pil_image.size
UpperCamelCase__ : Optional[Any] = pytesseract.image_to_data(__lowerCAmelCase , lang=__lowerCAmelCase , output_type="dict" , config=__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
UpperCamelCase__ : Tuple = [idx for idx, word in enumerate(__lowerCAmelCase ) if not word.strip()]
UpperCamelCase__ : Tuple = [word for idx, word in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : List[str] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : str = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase__ : List[Any] = []
for x, y, w, h in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(__lowerCAmelCase )
# finally, normalize the bounding boxes
UpperCamelCase__ : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __a ( A__ ):
_lowerCAmelCase : int = ['''pixel_values''']
def __init__( self : Dict , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[str] = "" , **SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = size if size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ : str = get_size_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = do_resize
UpperCamelCase__ : Union[str, Any] = size
UpperCamelCase__ : List[str] = resample
UpperCamelCase__ : Dict = apply_ocr
UpperCamelCase__ : str = ocr_lang
UpperCamelCase__ : List[str] = tesseract_config
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
UpperCamelCase__ : Union[str, Any] = (size["height"], size["width"])
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : Tuple = size if size is not None else self.size
UpperCamelCase__ : Any = get_size_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = resample if resample is not None else self.resample
UpperCamelCase__ : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase__ : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase__ : str = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase__ : Dict = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
UpperCamelCase__ : Dict = []
UpperCamelCase__ : List[Any] = []
for image in images:
UpperCamelCase__ , UpperCamelCase__ : Any = apply_tesseract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
words_batch.append(SCREAMING_SNAKE_CASE )
boxes_batch.append(SCREAMING_SNAKE_CASE )
if do_resize:
UpperCamelCase__ : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase__ : Any = [flip_channel_order(SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : str = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : Optional[Any] = BatchFeature(data={"pixel_values": images} , tensor_type=SCREAMING_SNAKE_CASE )
if apply_ocr:
UpperCamelCase__ : Tuple = words_batch
UpperCamelCase__ : Dict = boxes_batch
return data | 228 | 1 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
snake_case_ = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
snake_case_ = {
"gpt-neox-20b": 2048,
}
class a__ ( UpperCamelCase_ ):
__magic_name__ : List[Any] = VOCAB_FILES_NAMES
__magic_name__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__(self : Optional[int], __UpperCAmelCase : List[str]=None, __UpperCAmelCase : Any=None, __UpperCAmelCase : Dict=None, __UpperCAmelCase : List[Any]="<|endoftext|>", __UpperCAmelCase : Union[str, Any]="<|endoftext|>", __UpperCAmelCase : Optional[int]="<|endoftext|>", __UpperCAmelCase : List[Any]=False, **__UpperCAmelCase : Optional[Any], ) -> List[Any]:
"""simple docstring"""
super().__init__(
_a, _a, tokenizer_file=_a, unk_token=_a, bos_token=_a, eos_token=_a, add_prefix_space=_a, **_a, )
SCREAMING_SNAKE_CASE : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', _a ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Dict = getattr(_a, pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Tuple = add_prefix_space
SCREAMING_SNAKE_CASE : Optional[Any] = pre_tok_class(**_a )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
def lowercase__ (self : str, __UpperCAmelCase : Any, __UpperCAmelCase : List[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self._tokenizer.model.save(_a, name=_a )
return tuple(_a )
def lowercase__ (self : Any, __UpperCAmelCase : Dict ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a, add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
| 712 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __lowercase (_SCREAMING_SNAKE_CASE :List[str]=32 , _SCREAMING_SNAKE_CASE :List[Any]=10 , _SCREAMING_SNAKE_CASE :Optional[Any]=1_00 , _SCREAMING_SNAKE_CASE :int=10_26 , _SCREAMING_SNAKE_CASE :List[Any]=True , _SCREAMING_SNAKE_CASE :str="data/tokenized_stories_train_wikitext103.jbl" , _SCREAMING_SNAKE_CASE :List[Any]="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = generate_datasets(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , number=_SCREAMING_SNAKE_CASE , min_len=10_26 , trim=_SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE : List[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
SCREAMING_SNAKE_CASE : Any = load_gpta('''gpt2''' ).to(_SCREAMING_SNAKE_CASE )
print('''computing perplexity on objective set''' )
SCREAMING_SNAKE_CASE : List[Any] = compute_perplexity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).item()
print('''perplexity on objective set:''' , _SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :Optional[int]=15 , _SCREAMING_SNAKE_CASE :Any=1_28 , _SCREAMING_SNAKE_CASE :Any=1_00 , _SCREAMING_SNAKE_CASE :List[str]="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(_SCREAMING_SNAKE_CASE )
# Train secondary learner
SCREAMING_SNAKE_CASE : Any = train_secondary_learner(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_epochs=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , eval_freq=1_00 , igf_model_path=_SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __lowercase (_SCREAMING_SNAKE_CASE :Any , _SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :Union[str, Any]=32 , _SCREAMING_SNAKE_CASE :Tuple=10_00 , _SCREAMING_SNAKE_CASE :int=16 , _SCREAMING_SNAKE_CASE :List[str]=1.0 , _SCREAMING_SNAKE_CASE :Any=recopy_gpta , _SCREAMING_SNAKE_CASE :Tuple=None , _SCREAMING_SNAKE_CASE :int=10 , _SCREAMING_SNAKE_CASE :Optional[int]="gpt2_finetuned.pt" , ):
SCREAMING_SNAKE_CASE : Optional[Any] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
SCREAMING_SNAKE_CASE : Any = RandomSampler(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Dict = max_steps // (len(_SCREAMING_SNAKE_CASE )) + 1
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = recopy_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(_SCREAMING_SNAKE_CASE )
secondary_learner.eval()
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE : Any = compute_perplexity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
test_perps.append(_SCREAMING_SNAKE_CASE )
print('''Test perplexity, step''' , _SCREAMING_SNAKE_CASE , ''':''' , _SCREAMING_SNAKE_CASE )
for epoch in range(int(_SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(_SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE : Optional[Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE : str = secondary_learner.forward(
torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
SCREAMING_SNAKE_CASE : Dict = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE : Dict = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE : Tuple = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE : int = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE : Any = compute_perplexity(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
test_perps.append(_SCREAMING_SNAKE_CASE )
print('''Test perplexity, step''' , _SCREAMING_SNAKE_CASE , ''':''' , _SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __lowercase ():
SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=32 , type=_SCREAMING_SNAKE_CASE , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=1_00 , type=_SCREAMING_SNAKE_CASE , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=1_00 , type=_SCREAMING_SNAKE_CASE , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=10_00 , type=_SCREAMING_SNAKE_CASE , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=1_28 , type=_SCREAMING_SNAKE_CASE , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=16 , type=_SCREAMING_SNAKE_CASE , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=10 , type=_SCREAMING_SNAKE_CASE , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=1_00 , type=_SCREAMING_SNAKE_CASE , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=10_26 , type=_SCREAMING_SNAKE_CASE , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=15 , type=_SCREAMING_SNAKE_CASE , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=_SCREAMING_SNAKE_CASE , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=_SCREAMING_SNAKE_CASE , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=_SCREAMING_SNAKE_CASE , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE : int = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
SCREAMING_SNAKE_CASE : List[Any] = training_secondary_learner(
_SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = generate_datasets(
context_len=32 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_00 , min_len=10_26 , trim=_SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=_SCREAMING_SNAKE_CASE , secondary_learner=_SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 355 | 0 |
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = set()
# edges = list of graph's edges
__UpperCAmelCase : List[Any] = get_edges(lowercase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__UpperCAmelCase : Dict = edges.pop()
chosen_vertices.add(lowercase_ )
chosen_vertices.add(lowercase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase_ )
return chosen_vertices
def __SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 462 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "tf_padding" ) )
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "depth_multiplier" ) )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str]=13 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Union[str, Any]=32 , UpperCAmelCase_ : int=0.25 , UpperCAmelCase_ : Union[str, Any]=8 , UpperCAmelCase_ : Dict=8 , UpperCAmelCase_ : Optional[int]=6 , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str="relu6" , UpperCAmelCase_ : List[str]=1280 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=10 , UpperCAmelCase_ : Optional[Any]=None , ):
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : Dict = image_size
SCREAMING_SNAKE_CASE : int = depth_multiplier
SCREAMING_SNAKE_CASE : str = depth_divisible_by
SCREAMING_SNAKE_CASE : Union[str, Any] = min_depth
SCREAMING_SNAKE_CASE : int = expand_ratio
SCREAMING_SNAKE_CASE : Tuple = tf_padding
SCREAMING_SNAKE_CASE : List[str] = output_stride
SCREAMING_SNAKE_CASE : Optional[int] = first_layer_is_expansion
SCREAMING_SNAKE_CASE : Any = finegrained_output
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
SCREAMING_SNAKE_CASE : Any = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Dict = num_labels
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = scope
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def _A ( self : Optional[int] ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _A ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : int = MobileNetVaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def _A ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = MobileNetVaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : Dict = MobileNetVaForSemanticSegmentation(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : Any = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase_ : List[Any] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Any = False
UpperCamelCase_ : List[str] = False
UpperCamelCase_ : int = False
UpperCamelCase_ : str = False
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ )
def _A ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds" )
def _A ( self : List[Any] ):
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings" )
def _A ( self : Dict ):
pass
@unittest.skip(reason="MobileNetV2 does not output attentions" )
def _A ( self : Union[str, Any] ):
pass
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _A ( self : List[Any] ):
def check_hidden_states_output(UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE : List[Any] = outputs.hidden_states
SCREAMING_SNAKE_CASE : Any = 16
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : str = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Any ):
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_ )
@slow
def _A ( self : Optional[Any] ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : int = MobileNetVaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _A ( self : Optional[int] ):
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None
)
@slow
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : int = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = model(**UpperCAmelCase_ )
# verify the logits
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
@slow
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
SCREAMING_SNAKE_CASE : int = model.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" )
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : Dict = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = torch.tensor(
[
[[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]],
[[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]],
[[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]],
] , device=UpperCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 62 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : Dict=8 ):
__a : Optional[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__a : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCamelCase( __lowerCamelCase ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : UNetaDConditionModel , SCREAMING_SNAKE_CASE__ : DDPMScheduler , SCREAMING_SNAKE_CASE__ : VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , movq=SCREAMING_SNAKE_CASE__ , )
__a : Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if latents is None:
__a : List[str] = randn_tensor(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__a : List[Any] = latents.to(SCREAMING_SNAKE_CASE__ )
__a : List[str] = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__a : Any = torch.device(f'''cuda:{gpu_id}''' )
__a : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Any=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__a : int = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=SCREAMING_SNAKE_CASE__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__a : Optional[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__a : Union[str, Any] = cpu_offload_with_hook(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , prev_module_hook=SCREAMING_SNAKE_CASE__ )
# We'll offload the last model manually.
__a : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE__ , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE__ )
def __call__( self : int , SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE__ : int = 5_1_2 , SCREAMING_SNAKE_CASE__ : int = 5_1_2 , SCREAMING_SNAKE_CASE__ : int = 1_0_0 , SCREAMING_SNAKE_CASE__ : float = 4.0 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , ):
'''simple docstring'''
__a : Optional[Any] = self._execution_device
__a : Tuple = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Any = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
__a : Optional[Any] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : int = torch.cat(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
__a : str = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
__a : str = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
__a : Tuple = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE__ )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=SCREAMING_SNAKE_CASE__ )
__a : int = self.scheduler.timesteps
__a : Tuple = self.unet.config.in_channels
__a : int = downscale_height_and_width(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.movq_scale_factor )
# create initial latent
__a : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the latents if we are doing classifier free guidance
__a : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a : Optional[Any] = {'image_embeds': image_embeds}
__a : Union[str, Any] = self.unet(
sample=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , added_cond_kwargs=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ , )[0]
if do_classifier_free_guidance:
__a : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
__a : str = noise_pred.chunk(2 )
__a : Any = variance_pred.chunk(2 )
__a : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__a : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__a : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__a : Optional[Any] = self.scheduler.step(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , )[0]
# post-processing
__a : Dict = self.movq.decode(SCREAMING_SNAKE_CASE__ , force_not_quantize=SCREAMING_SNAKE_CASE__ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__a : List[Any] = image * 0.5 + 0.5
__a : Dict = image.clamp(0 , 1 )
__a : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__a : Optional[int] = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
| 708 |
class _UpperCamelCase:
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
__a : List[Any] = None
__a : Tuple = None
__a : int = graph
self._normalize_graph(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : str = len(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = None
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if sources is int:
__a : Union[str, Any] = [sources]
if sinks is int:
__a : Dict = [sinks]
if len(SCREAMING_SNAKE_CASE__ ) == 0 or len(SCREAMING_SNAKE_CASE__ ) == 0:
return
__a : Union[str, Any] = sources[0]
__a : Tuple = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE__ ) > 1 or len(SCREAMING_SNAKE_CASE__ ) > 1:
__a : List[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__a : Optional[Any] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__a : List[str] = max_input_flow
__a : str = 0
__a : Optional[int] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__a : List[Any] = max_input_flow
__a : Union[str, Any] = size - 1
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
__a : Any = algorithm(self )
class _UpperCamelCase:
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
__a : Union[str, Any] = flow_network
__a : str = flow_network.verticesCount
__a : Dict = flow_network.sourceIndex
__a : Dict = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__a : List[str] = flow_network.graph
__a : Optional[Any] = False
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
if not self.executed:
self._algorithm()
__a : Optional[int] = True
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
pass
class _UpperCamelCase( __lowerCamelCase ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
# use this to save your result
__a : Tuple = -1
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class _UpperCamelCase( __lowerCamelCase ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = [[0] * self.verticies_count for i in range(self.verticies_count )]
__a : Any = [0] * self.verticies_count
__a : int = [0] * self.verticies_count
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Any = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__a : Dict = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__a : Optional[int] = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
__a : Tuple = vertices_list[i]
__a : Dict = self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE__ ) )
__a : List[str] = 0
else:
i += 1
__a : List[Any] = sum(self.preflow[self.source_index] )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.relabel(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a : Optional[int] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__a : List[str] = self.heights[to_index]
if min_height is not None:
__a : str = min_height + 1
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = [0]
SCREAMING_SNAKE_CASE__ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
SCREAMING_SNAKE_CASE__ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
SCREAMING_SNAKE_CASE__ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
SCREAMING_SNAKE_CASE__ = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}")
| 577 | 0 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
A__ : int = parser.parse_args()
if args.model_type == "bert":
A__ : int = BertForMaskedLM.from_pretrained(args.model_name)
A__ : Tuple = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
A__ : Any = model.state_dict()
A__ : Union[str, Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
A__ : Union[str, Any] = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
A__ : Union[str, Any] = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
A__ : str = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
A__ : List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
A__ : Any = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
A__ : List[str] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
A__ : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
A__ : str = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
A__ : Union[str, Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
A__ : List[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
A__ : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
A__ : Tuple = state_dict["""cls.predictions.decoder.weight"""]
A__ : Union[str, Any] = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
A__ : int = state_dict[f"""cls.predictions.transform.dense.{w}"""]
A__ : Dict = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 233 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
A__ : List[str] = pytest.mark.integration
A__ : List[Any] = {"""comet"""}
A__ : str = importlib.util.find_spec("""fairseq""") is not None
A__ : str = {"""code_eval"""}
A__ : List[Any] = os.name == """nt"""
A__ : Optional[Any] = {"""bertscore""", """frugalscore""", """perplexity"""}
A__ : List[str] = importlib.util.find_spec("""transformers""") is not None
def _a ( __UpperCamelCase : Dict ):
@wraps(__UpperCamelCase )
def wrapper(self : Dict ,__UpperCamelCase : List[str] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self ,__UpperCamelCase )
return wrapper
def _a ( __UpperCamelCase : Optional[int] ):
@wraps(__UpperCamelCase )
def wrapper(self : Any ,__UpperCamelCase : Optional[int] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self ,__UpperCamelCase )
return wrapper
def _a ( __UpperCamelCase : Optional[int] ):
@wraps(__UpperCamelCase )
def wrapper(self : Optional[int] ,__UpperCamelCase : List[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self ,__UpperCamelCase )
return wrapper
def _a ( ):
lowerCAmelCase__ : str = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@local
class lowercase ( parameterized.TestCase ):
__a = {}
__a = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : int = '''[...]'''
lowerCAmelCase__ : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE__ ) ).module_path )
lowerCAmelCase__ : Tuple = datasets.load.import_main_class(metric_module.__name__ , dataset=SCREAMING_SNAKE_CASE__ )
# check parameters
lowerCAmelCase__ : Tuple = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(SCREAMING_SNAKE_CASE__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCAmelCase__ : Dict = doctest.testmod(SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , raise_on_error=SCREAMING_SNAKE_CASE__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = '''[...]'''
lowerCAmelCase__ : List[str] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCAmelCase__ : str = doctest.testmod(SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , raise_on_error=SCREAMING_SNAKE_CASE__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](SCREAMING_SNAKE_CASE__ ):
yield
else:
yield
@contextmanager
def lowercase_ ( self ):
"""simple docstring"""
def load_local_metric(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
return load_metric(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE__ ) , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with patch('''datasets.load_metric''' ) as mock_load_metric:
lowerCAmelCase__ : Optional[Any] = load_local_metric
yield
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def wrapper(SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ : Any = contextmanager(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Tuple = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def _a ( __UpperCamelCase : int ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' ,'''''' ,'''''' ) # handle pytest cli flags
class lowercase ( __UpperCamelCase ):
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
lowerCAmelCase__ : List[str] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def _a ( __UpperCamelCase : str ):
import torch
def bert_cos_score_idf(__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,*__UpperCamelCase : str ,**__UpperCamelCase : List[Any] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__UpperCamelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
lowerCAmelCase__ : List[str] = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def _a ( __UpperCamelCase : Tuple ):
def load_from_checkpoint(__UpperCamelCase : Any ):
class lowercase :
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE__ ) == 2
lowerCAmelCase__ : Dict = [0.19, 0.92]
return scores, sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
lowerCAmelCase__ : str = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
lowerCAmelCase__ : Optional[Any] = load_from_checkpoint
yield
def _a ( ):
lowerCAmelCase__ : int = load_metric(os.path.join('''metrics''' ,'''seqeval''' ) )
lowerCAmelCase__ : Dict = '''ERROR'''
lowerCAmelCase__ : Optional[int] = f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(__UpperCamelCase ,match=re.escape(__UpperCamelCase ) ):
metric.compute(predictions=[] ,references=[] ,scheme=__UpperCamelCase )
| 233 | 1 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=13 , _snake_case=30 , _snake_case=2 , _snake_case=3 , _snake_case=True , _snake_case=True , _snake_case=32 , _snake_case=2 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=10 , _snake_case=0.0_2 , _snake_case=3 , _snake_case=0.6 , _snake_case=None , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = mask_ratio
__lowerCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__lowerCamelCase = (image_size // patch_size) ** 2
__lowerCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = TFViTMAEModel(config=_snake_case )
__lowerCamelCase = model(_snake_case , training=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = TFViTMAEForPreTraining(_snake_case )
__lowerCamelCase = model(_snake_case , training=_snake_case )
# expected sequence length = num_patches
__lowerCamelCase = (self.image_size // self.patch_size) ** 2
__lowerCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
__lowerCamelCase = 1
__lowerCamelCase = TFViTMAEForPreTraining(_snake_case )
__lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase = model(_snake_case , training=_snake_case )
__lowerCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) = config_and_inputs
__lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = TFViTMAEModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , tf.keras.layers.Layer ) )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(_snake_case )
__lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
np.random.seed(2 )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
__lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(_snake_case )
__lowerCamelCase = self._prepare_for_class(_snake_case , _snake_case )
__lowerCamelCase = model(_snake_case , noise=_snake_case )
__lowerCamelCase = copy.deepcopy(self._prepare_for_class(_snake_case , _snake_case ) )
__lowerCamelCase = model(**_snake_case , noise=_snake_case )
__lowerCamelCase = outputs_dict[0].numpy()
__lowerCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def _lowerCamelCase ( self ):
"""simple docstring"""
np.random.seed(2 )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
__lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_snake_case ):
__lowerCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_snake_case ):
__lowerCamelCase = v.numpy()
else:
__lowerCamelCase = np.array(_snake_case )
return inputs_np_dict
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(_snake_case )
__lowerCamelCase = self._prepare_for_class(_snake_case , _snake_case )
__lowerCamelCase = prepare_numpy_arrays(_snake_case )
__lowerCamelCase = model(_snake_case , noise=_snake_case )
__lowerCamelCase = model(**_snake_case , noise=_snake_case )
self.assert_outputs_same(_snake_case , _snake_case )
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
np.random.seed(2 )
__lowerCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
__lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__lowerCamelCase = tf.constant(_snake_case )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__lowerCamelCase = tf_noise
super().check_pt_tf_models(_snake_case , _snake_case , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
np.random.seed(2 )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_snake_case )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(_snake_case , _snake_case ),)
if isinstance(_snake_case , _snake_case )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_snake_case , '''_keras_serializable''' , _snake_case )
}
__lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
__lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
__lowerCamelCase = tf.convert_to_tensor(_snake_case )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
__lowerCamelCase = main_layer_class(_snake_case )
__lowerCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
__lowerCamelCase = tf.keras.Model(_snake_case , outputs=main_layer(_snake_case ) )
__lowerCamelCase = model(_snake_case )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = os.path.join(_snake_case , '''keras_model.h5''' )
model.save(_snake_case )
__lowerCamelCase = tf.keras.models.load_model(
_snake_case , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_snake_case , tf.keras.Model )
__lowerCamelCase = model(_snake_case )
self.assert_outputs_same(_snake_case , _snake_case )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
np.random.seed(2 )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
__lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(_snake_case )
__lowerCamelCase = self._prepare_for_class(_snake_case , _snake_case )
__lowerCamelCase = model(_snake_case , noise=_snake_case )
if model_class.__name__ == "TFViTMAEModel":
__lowerCamelCase = outputs.last_hidden_state.numpy()
__lowerCamelCase = 0
else:
__lowerCamelCase = outputs.logits.numpy()
__lowerCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case , saved_model=_snake_case )
__lowerCamelCase = model_class.from_pretrained(_snake_case )
__lowerCamelCase = model(_snake_case , noise=_snake_case )
if model_class.__name__ == "TFViTMAEModel":
__lowerCamelCase = after_outputs['''last_hidden_state'''].numpy()
__lowerCamelCase = 0
else:
__lowerCamelCase = after_outputs['''logits'''].numpy()
__lowerCamelCase = 0
__lowerCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1E-5 )
def _lowerCamelCase ( self ):
"""simple docstring"""
np.random.seed(2 )
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
__lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(_snake_case )
__lowerCamelCase = self._prepare_for_class(_snake_case , _snake_case )
__lowerCamelCase = model(_snake_case , noise=_snake_case )
__lowerCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_snake_case )
__lowerCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
__lowerCamelCase = model_class.from_config(model.config )
__lowerCamelCase = new_model(_snake_case ) # Build model
new_model.set_weights(model.get_weights() )
__lowerCamelCase = new_model(_snake_case , noise=_snake_case )
self.assert_outputs_same(_snake_case , _snake_case )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(_snake_case )
def lowerCamelCase_ ( ):
__lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
np.random.seed(2 )
__lowerCamelCase = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=_snake_case , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__lowerCamelCase = ViTMAEConfig()
__lowerCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
__lowerCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
__lowerCamelCase = model(**_snake_case , noise=_snake_case )
# verify the logits
__lowerCamelCase = tf.convert_to_tensor([1, 1_96, 7_68] )
self.assertEqual(outputs.logits.shape , _snake_case )
__lowerCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , _snake_case , atol=1E-4 )
| 575 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return self.get_dummy_input()
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def _lowerCamelCase ( self , _snake_case=True , _snake_case=False , _snake_case=False , _snake_case=False , ):
"""simple docstring"""
__lowerCamelCase = 4
__lowerCamelCase = 32
__lowerCamelCase = (32, 32)
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = torch.device(_snake_case )
__lowerCamelCase = (batch_size, num_channels) + sizes
__lowerCamelCase = randn_tensor(_snake_case , generator=_snake_case , device=_snake_case )
__lowerCamelCase = {'''hidden_states''': hidden_states}
if include_temb:
__lowerCamelCase = 1_28
__lowerCamelCase = randn_tensor((batch_size, temb_channels) , generator=_snake_case , device=_snake_case )
if include_res_hidden_states_tuple:
__lowerCamelCase = torch.manual_seed(1 )
__lowerCamelCase = (randn_tensor(_snake_case , generator=_snake_case , device=_snake_case ),)
if include_encoder_hidden_states:
__lowerCamelCase = floats_tensor((batch_size, 32, 32) ).to(_snake_case )
if include_skip_sample:
__lowerCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=_snake_case , device=_snake_case )
return dummy_input
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 1_28,
}
if self.block_type == "up":
__lowerCamelCase = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
__lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.prepare_init_args_and_inputs_for_common()
__lowerCamelCase = self.block_class(**_snake_case )
unet_block.to(_snake_case )
unet_block.eval()
with torch.no_grad():
__lowerCamelCase = unet_block(**_snake_case )
if isinstance(_snake_case , _snake_case ):
__lowerCamelCase = output[0]
self.assertEqual(output.shape , self.output_shape )
__lowerCamelCase = output[0, -1, -3:, -3:]
__lowerCamelCase = torch.tensor(_snake_case ).to(_snake_case )
assert torch_all_close(output_slice.flatten() , _snake_case , atol=5E-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.prepare_init_args_and_inputs_for_common()
__lowerCamelCase = self.block_class(**_snake_case )
model.to(_snake_case )
model.train()
__lowerCamelCase = model(**_snake_case )
if isinstance(_snake_case , _snake_case ):
__lowerCamelCase = output[0]
__lowerCamelCase = torch.device(_snake_case )
__lowerCamelCase = randn_tensor(output.shape , device=_snake_case )
__lowerCamelCase = torch.nn.functional.mse_loss(_snake_case , _snake_case )
loss.backward()
| 575 | 1 |
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
UpperCamelCase__ : str = "facebook/wmt19-en-de"
UpperCamelCase__ : Optional[Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
UpperCamelCase__ : List[Any] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
UpperCamelCase__ : str = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
UpperCamelCase__ : List[Any] = tokenizer(['Making tiny model'], return_tensors='pt')
UpperCamelCase__ : Tuple = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
UpperCamelCase__ : Tuple = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 614 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = "▁"
_lowerCAmelCase : Optional[int] = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowerCAmelCase : Tuple = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
_lowerCAmelCase : Optional[Any] = {
"facebook/m2m100_418M": 1_024,
}
# fmt: off
_lowerCAmelCase : Any = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
def __init__( self , __snake_case , __snake_case , __snake_case=None , __snake_case=None , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<pad>" , __snake_case="<unk>" , __snake_case="m2m100" , __snake_case = None , __snake_case=8 , **__snake_case , ) -> None:
'''simple docstring'''
__a ={} if sp_model_kwargs is None else sp_model_kwargs
__a =language_codes
__a =FAIRSEQ_LANGUAGE_CODES[language_codes]
__a ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__a =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__snake_case )
for lang_code in fairseq_language_code
if self.get_lang_token(__snake_case ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__snake_case , tgt_lang=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , language_codes=__snake_case , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__snake_case , **__snake_case , )
__a =vocab_file
__a =load_json(__snake_case )
__a ={v: k for k, v in self.encoder.items()}
__a =spm_file
__a =load_spm(__snake_case , self.sp_model_kwargs )
__a =len(self.encoder )
__a ={
self.get_lang_token(__snake_case ): self.encoder_size + i for i, lang_code in enumerate(__snake_case )
}
__a ={lang_code: self.encoder_size + i for i, lang_code in enumerate(__snake_case )}
__a ={v: k for k, v in self.lang_token_to_id.items()}
__a =src_lang if src_lang is not None else 'en'
__a =tgt_lang
__a =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__a =num_madeup_words
@property
def __magic_name__ ( self ) -> int:
'''simple docstring'''
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __magic_name__ ( self ) -> str:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
__a =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __magic_name__ ( self , __snake_case ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__snake_case , self.encoder[self.unk_token] )
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__snake_case , self.unk_token )
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
__a =[]
__a =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__snake_case ) + token
__a =[]
else:
current_sub_tokens.append(__snake_case )
out_string += self.sp_model.decode(__snake_case )
return out_string.strip()
def __magic_name__ ( self , __snake_case , __snake_case = None , __snake_case = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
__a =[1] * len(self.prefix_tokens )
__a =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
__a ={self.convert_ids_to_tokens(__snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
__a =self.__dict__.copy()
__a =None
return state
def __setstate__( self , __snake_case ) -> None:
'''simple docstring'''
__a =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a ={}
__a =load_spm(self.spm_file , self.sp_model_kwargs )
def __magic_name__ ( self , __snake_case , __snake_case = None ) -> Tuple[str]:
'''simple docstring'''
__a =Path(__snake_case )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__a =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__a =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , __snake_case )
if os.path.abspath(self.spm_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __snake_case )
elif not os.path.isfile(self.spm_file ):
with open(__snake_case , 'wb' ) as fi:
__a =self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (str(__snake_case ), str(__snake_case ))
def __magic_name__ ( self , __snake_case , __snake_case = "en" , __snake_case = None , __snake_case = "ro" , **__snake_case , ) -> BatchEncoding:
'''simple docstring'''
__a =src_lang
__a =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__snake_case , __snake_case , **__snake_case )
def __magic_name__ ( self , __snake_case , __snake_case , __snake_case , **__snake_case ) -> Optional[Any]:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__a =src_lang
__a =self(__snake_case , add_special_tokens=__snake_case , **__snake_case )
__a =self.get_lang_id(__snake_case )
__a =tgt_lang_id
return inputs
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
self.set_src_lang_special_tokens(self.src_lang )
def __magic_name__ ( self ) -> Tuple:
'''simple docstring'''
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
__a =self.get_lang_token(__snake_case )
__a =self.lang_token_to_id[lang_token]
__a =[self.cur_lang_id]
__a =[self.eos_token_id]
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
__a =self.get_lang_token(__snake_case )
__a =self.lang_token_to_id[lang_token]
__a =[self.cur_lang_id]
__a =[self.eos_token_id]
def __magic_name__ ( self , __snake_case ) -> str:
'''simple docstring'''
return self.lang_code_to_token[lang]
def __magic_name__ ( self , __snake_case ) -> int:
'''simple docstring'''
__a =self.get_lang_token(__snake_case )
return self.lang_token_to_id[lang_token]
def UpperCamelCase_( _snake_case : str , _snake_case : Dict[str, Any] ):
"""simple docstring"""
__a =sentencepiece.SentencePieceProcessor(**_snake_case )
spm.Load(str(_snake_case ) )
return spm
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
with open(_snake_case , 'r' ) as f:
return json.load(_snake_case )
def UpperCamelCase_( _snake_case : str , _snake_case : str ):
"""simple docstring"""
with open(_snake_case , 'w' ) as f:
json.dump(_snake_case , _snake_case , indent=2 )
| 242 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
UpperCAmelCase_ : Optional[int] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
UpperCAmelCase_ : Tuple = 'The dog is cute and lives in the garden house'
UpperCAmelCase_ : Dict = jnp.array([tokenizer.encode(snake_case_ )] )
UpperCAmelCase_ : str = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ : List[str] = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
UpperCAmelCase_ : str = model(snake_case_ )['last_hidden_state']
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case_ , atol=1E-3 ) )
| 703 | '''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 389 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for attribute in key.split("." ):
a__ : Optional[int] = getattr(lowerCamelCase , lowerCamelCase )
if weight_type is not None:
a__ : Any = getattr(lowerCamelCase , lowerCamelCase ).shape
else:
a__ : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
a__ : Union[str, Any] = value
elif weight_type == "weight_g":
a__ : Union[str, Any] = value
elif weight_type == "weight_v":
a__ : List[str] = value
elif weight_type == "bias":
a__ : Optional[Any] = value
else:
a__ : List[str] = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[Any] = []
a__ : str = fairseq_model.state_dict()
a__ : Optional[int] = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
a__ : int = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
a__ : Any = True
else:
for key, mapped_key in MAPPING.items():
a__ : Union[str, Any] = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a__ : Optional[Any] = True
if "*" in mapped_key:
a__ : Optional[int] = name.split(lowerCamelCase )[0].split("." )[-2]
a__ : List[Any] = mapped_key.replace("*" , lowerCamelCase )
if "weight_g" in name:
a__ : Union[str, Any] = "weight_g"
elif "weight_v" in name:
a__ : int = "weight_v"
elif "weight" in name:
a__ : Dict = "weight"
elif "bias" in name:
a__ : Optional[int] = "bias"
else:
a__ : str = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Union[str, Any] = full_name.split("conv_layers." )[-1]
a__ : Union[str, Any] = name.split("." )
a__ : Optional[Any] = int(items[0] )
a__ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
a__ : Union[str, Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
a__ : Dict = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
a__ : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
a__ : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase )
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Dict = SEWConfig()
if is_finetuned:
a__ : Optional[Any] = model.wav_encoder.wav_model.cfg
else:
a__ : int = model.cfg
a__ : Any = fs_config.conv_bias
a__ : List[str] = eval(fs_config.conv_feature_layers )
a__ : Any = [x[0] for x in conv_layers]
a__ : Union[str, Any] = [x[1] for x in conv_layers]
a__ : Optional[Any] = [x[2] for x in conv_layers]
a__ : Any = "gelu"
a__ : Any = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
a__ : Tuple = 0.0
a__ : List[str] = fs_config.activation_fn.name
a__ : List[str] = fs_config.encoder_embed_dim
a__ : Tuple = 0.02
a__ : Optional[Any] = fs_config.encoder_ffn_embed_dim
a__ : Optional[int] = 1E-5
a__ : Dict = fs_config.encoder_layerdrop
a__ : Any = fs_config.encoder_attention_heads
a__ : Any = fs_config.conv_pos_groups
a__ : List[Any] = fs_config.conv_pos
a__ : Tuple = len(lowerCamelCase )
a__ : Optional[Any] = fs_config.encoder_layers
a__ : str = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
a__ : List[str] = model.cfg
a__ : List[str] = fs_config.final_dropout
a__ : int = fs_config.layerdrop
a__ : int = fs_config.activation_dropout
a__ : Tuple = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
a__ : Tuple = fs_config.attention_dropout
a__ : Tuple = fs_config.dropout_input
a__ : int = fs_config.dropout
a__ : int = fs_config.mask_channel_length
a__ : int = fs_config.mask_channel_prob
a__ : List[Any] = fs_config.mask_length
a__ : Optional[Any] = fs_config.mask_prob
a__ : Any = "Wav2Vec2FeatureExtractor"
a__ : List[Any] = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True ):
if is_finetuned:
a__ , a__ , a__ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
a__ , a__ , a__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
a__ : int = SEWConfig.from_pretrained(lowerCamelCase )
else:
a__ : List[str] = convert_config(model[0] , lowerCamelCase )
a__ : str = model[0].eval()
a__ : List[str] = True if config.feat_extract_norm == "layer" else False
a__ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCamelCase , return_attention_mask=lowerCamelCase , )
if is_finetuned:
if dict_path:
a__ : Optional[int] = Dictionary.load(lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a__ : Optional[int] = target_dict.pad_index
a__ : Union[str, Any] = target_dict.bos_index
a__ : Any = target_dict.pad_index
a__ : int = target_dict.bos_index
a__ : str = target_dict.eos_index
a__ : Union[str, Any] = len(target_dict.symbols )
a__ : str = os.path.join(lowerCamelCase , "vocab.json" )
if not os.path.isdir(lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(lowerCamelCase ) )
return
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , lowerCamelCase )
a__ : Union[str, Any] = WavaVecaCTCTokenizer(
lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=lowerCamelCase , )
a__ : Dict = WavaVecaProcessor(feature_extractor=lowerCamelCase , tokenizer=lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
a__ : Optional[int] = SEWForCTC(lowerCamelCase )
else:
a__ : str = SEWModel(lowerCamelCase )
feature_extractor.save_pretrained(lowerCamelCase )
recursively_load_weights(lowerCamelCase , lowerCamelCase , lowerCamelCase )
hf_model.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 112 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : int = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
SCREAMING_SNAKE_CASE__ : int = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
SCREAMING_SNAKE_CASE__ : List[str] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Any = """whisper"""
_UpperCamelCase : Union[str, Any] = ["""past_key_values"""]
_UpperCamelCase : Union[str, Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , snake_case=51_865 , snake_case=80 , snake_case=6 , snake_case=4 , snake_case=6 , snake_case=4 , snake_case=1_536 , snake_case=1_536 , snake_case=0.0 , snake_case=0.0 , snake_case=50_257 , snake_case=True , snake_case=True , snake_case="gelu" , snake_case=256 , snake_case=0.0 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=False , snake_case=1_500 , snake_case=448 , snake_case=50_256 , snake_case=50_256 , snake_case=50_256 , snake_case=None , snake_case=[220, 50_256] , snake_case=False , snake_case=256 , snake_case=False , snake_case=0.05 , snake_case=10 , snake_case=2 , snake_case=0.0 , snake_case=10 , snake_case=0 , snake_case=7 , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = vocab_size
a__ : int = num_mel_bins
a__ : Dict = d_model
a__ : List[Any] = encoder_layers
a__ : List[Any] = encoder_attention_heads
a__ : Optional[int] = decoder_layers
a__ : int = decoder_attention_heads
a__ : Optional[Any] = decoder_ffn_dim
a__ : List[Any] = encoder_ffn_dim
a__ : int = dropout
a__ : Optional[int] = attention_dropout
a__ : Tuple = activation_dropout
a__ : Optional[Any] = activation_function
a__ : List[Any] = init_std
a__ : List[Any] = encoder_layerdrop
a__ : Dict = decoder_layerdrop
a__ : List[Any] = use_cache
a__ : Union[str, Any] = encoder_layers
a__ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
a__ : Tuple = max_source_positions
a__ : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
a__ : Optional[Any] = classifier_proj_size
a__ : int = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ : Tuple = apply_spec_augment
a__ : int = mask_time_prob
a__ : Optional[Any] = mask_time_length
a__ : List[str] = mask_time_min_masks
a__ : List[str] = mask_feature_prob
a__ : Dict = mask_feature_length
a__ : Any = mask_feature_min_masks
a__ : List[str] = median_filter_width
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , decoder_start_token_id=snake_case , suppress_tokens=snake_case , begin_suppress_tokens=snake_case , **snake_case , )
class __lowerCAmelCase ( _UpperCamelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
a__ : Dict = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
a__ : Tuple = {0: "batch"}
else:
a__ : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(snake_case , direction="inputs" )
return common_inputs
def _snake_case ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , snake_case = 22_050 , snake_case = 5.0 , snake_case = 220 , ) -> Mapping[str, Any]:
"""simple docstring"""
a__ : int = OrderedDict()
a__ : List[str] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=snake_case , framework=snake_case , sampling_rate=snake_case , time_duration=snake_case , frequency=snake_case , )
a__ : Optional[int] = encoder_inputs["input_features"].shape[2]
a__ : str = encoder_sequence_length // 2 if self.use_past else seq_length
a__ : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , snake_case , snake_case , snake_case , snake_case )
a__ : Any = encoder_inputs.pop("input_features" )
a__ : Dict = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
a__ : Tuple = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def _snake_case ( self ) -> float:
"""simple docstring"""
return 1E-3
| 112 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : str = logging.get_logger(__name__)
def lowercase_ ( _lowercase : str ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = DPTConfig()
if "large" in checkpoint_url:
UpperCAmelCase : List[str] = 10_24
UpperCAmelCase : Dict = 40_96
UpperCAmelCase : Union[str, Any] = 24
UpperCAmelCase : List[str] = 16
UpperCAmelCase : List[str] = [5, 11, 17, 23]
UpperCAmelCase : Any = [2_56, 5_12, 10_24, 10_24]
UpperCAmelCase : Tuple = (1, 3_84, 3_84)
if "ade" in checkpoint_url:
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : str = 1_50
UpperCAmelCase : Optional[int] = "huggingface/label-files"
UpperCAmelCase : Any = "ade20k-id2label.json"
UpperCAmelCase : Optional[Any] = json.load(open(cached_download(hf_hub_url(_lowercase , _lowercase , repo_type="dataset" ) ) , "r" ) )
UpperCAmelCase : Any = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : Tuple = idalabel
UpperCAmelCase : int = {v: k for k, v in idalabel.items()}
UpperCAmelCase : List[str] = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def lowercase_ ( _lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase : Tuple = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def lowercase_ ( _lowercase : int ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase : Union[str, Any] = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
UpperCAmelCase : Union[str, Any] = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
UpperCAmelCase : Tuple = name.replace("patch_embed" , "patch_embeddings" )
if "pos_embed" in name:
UpperCAmelCase : Optional[Any] = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
UpperCAmelCase : Optional[int] = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
UpperCAmelCase : Dict = name.replace("proj" , "projection" )
if "blocks" in name:
UpperCAmelCase : Any = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
UpperCAmelCase : int = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name:
UpperCAmelCase : str = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase : List[Any] = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
UpperCAmelCase : str = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
UpperCAmelCase : int = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
UpperCAmelCase : Any = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
UpperCAmelCase : Optional[int] = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
UpperCAmelCase : Any = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
UpperCAmelCase : Dict = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
UpperCAmelCase : Tuple = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase : Tuple = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
UpperCAmelCase : Optional[Any] = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
UpperCAmelCase : Optional[Any] = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
UpperCAmelCase : int = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
UpperCAmelCase : Optional[Any] = name.replace("conv1" , "convolution1" )
if "conv2" in name:
UpperCAmelCase : int = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase : Any = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase : Dict = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase : Tuple = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase : Any = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase : Optional[int] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase : int = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase : Optional[int] = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase : int = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase : str = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase : Tuple = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase : List[Any] = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
UpperCAmelCase : Any = name.replace("pretrained" , "dpt" )
if "bn" in name:
UpperCAmelCase : Optional[Any] = name.replace("bn" , "batch_norm" )
if "head" in name:
UpperCAmelCase : List[str] = name.replace("head" , "head.head" )
if "encoder.norm" in name:
UpperCAmelCase : Optional[Any] = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
UpperCAmelCase : List[str] = name.replace("auxlayer" , "auxiliary_head.head" )
return name
def lowercase_ ( _lowercase : Tuple , _lowercase : Union[str, Any] ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase : Union[str, Any] = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
UpperCAmelCase : Union[str, Any] = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase : int = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase : Any = in_proj_bias[: config.hidden_size]
UpperCAmelCase : Optional[int] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase : str = in_proj_bias[-config.hidden_size :]
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : str = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowercase_ ( _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple = get_dpt_config(_lowercase )
# load original state_dict from URL
UpperCAmelCase : Optional[int] = torch.hub.load_state_dict_from_url(_lowercase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(_lowercase )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase : List[str] = state_dict.pop(_lowercase )
UpperCAmelCase : Optional[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowercase , _lowercase )
# load HuggingFace model
UpperCAmelCase : Any = DPTForSemanticSegmentation(_lowercase ) if "ade" in checkpoint_url else DPTForDepthEstimation(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# Check outputs on an image
UpperCAmelCase : Optional[int] = 4_80 if "ade" in checkpoint_url else 3_84
UpperCAmelCase : Optional[Any] = DPTImageProcessor(size=_lowercase )
UpperCAmelCase : Optional[int] = prepare_img()
UpperCAmelCase : Optional[Any] = image_processor(_lowercase , return_tensors="pt" )
# forward pass
UpperCAmelCase : Tuple = model(**_lowercase ).logits if "ade" in checkpoint_url else model(**_lowercase ).predicted_depth
# Assert logits
UpperCAmelCase : Optional[Any] = torch.tensor([[6.3_1_9_9, 6.3_6_2_9, 6.4_1_4_8], [6.3_8_5_0, 6.3_6_1_5, 6.4_1_6_6], [6.3_5_1_9, 6.3_1_7_6, 6.3_5_7_5]] )
if "ade" in checkpoint_url:
UpperCAmelCase : int = torch.tensor([[4.0_4_8_0, 4.2_4_2_0, 4.4_3_6_0], [4.3_1_2_4, 4.5_6_9_3, 4.8_2_6_1], [4.5_7_6_8, 4.8_9_6_5, 5.2_1_6_3]] )
assert outputs.shape == torch.Size(_lowercase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , _lowercase , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _lowercase )
)
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
print("Pushing model to hub..." )
model.push_to_hub(
repo_path_or_name=Path(_lowercase , _lowercase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowercase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowercase , _lowercase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowercase , )
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
snake_case_ : Tuple = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 292 |
"""simple docstring"""
def lowercase_ ( _lowercase : list , _lowercase : int , _lowercase : int = 0 , _lowercase : int = 0 ):
'''simple docstring'''
UpperCAmelCase : Tuple = right or len(_lowercase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_lowercase , _lowercase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 292 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Dict) ->int:
'''simple docstring'''
A__ = 0
def SCREAMING_SNAKE_CASE ( self : Any) ->Tuple:
'''simple docstring'''
A__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''')
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = Path(UpperCAmelCase__) / '''preprocessor_config.json'''
A__ = Path(UpperCAmelCase__) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase__ , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase__ , '''w'''))
A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = Path(UpperCAmelCase__) / '''preprocessor_config.json'''
A__ = Path(UpperCAmelCase__) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase__ , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase__ , '''w'''))
A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
A__ = Path(UpperCAmelCase__) / '''preprocessor_config.json'''
A__ = Path(UpperCAmelCase__) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase__ , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase__ , '''w'''))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__).to_dict()
config_dict.pop('''image_processor_type''')
A__ = CLIPImageProcessor(**UpperCAmelCase__)
# save in new folder
model_config.save_pretrained(UpperCAmelCase__)
config.save_pretrained(UpperCAmelCase__)
A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__)
# make sure private variable is not incorrectly saved
A__ = json.loads(config.to_json_string())
self.assertTrue('''_processor_class''' not in dict_as_saved)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = Path(UpperCAmelCase__) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase__ , '''w''') , )
A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase__ , '''clip-base is not a local folder and is not a valid model identifier'''):
A__ = AutoImageProcessor.from_pretrained('''clip-base''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase__ , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__ , revision='''aaaaaa''')
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
A__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''')
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase__):
A__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase__):
A__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase__)
A__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase__)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase__)
A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__ , trust_remote_code=UpperCAmelCase__)
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''')
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase__)
AutoImageProcessor.register(UpperCAmelCase__ , UpperCAmelCase__)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase__):
AutoImageProcessor.register(UpperCAmelCase__ , UpperCAmelCase__)
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = Path(UpperCAmelCase__) / '''preprocessor_config.json'''
A__ = Path(UpperCAmelCase__) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase__ , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase__ , '''w'''))
A__ = CustomImageProcessor.from_pretrained(UpperCAmelCase__)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase__)
A__ = AutoImageProcessor.from_pretrained(UpperCAmelCase__)
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
'''simple docstring'''
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase__)
AutoImageProcessor.register(UpperCAmelCase__ , UpperCAmelCase__)
# If remote code is not set, the default is to use local
A__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''')
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
A__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase__)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
A__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase__)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(not hasattr(UpperCAmelCase__ , '''is_local'''))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 87 |
"""simple docstring"""
from math import factorial
class A__ :
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Any) -> str:
"""simple docstring"""
__lowerCAmelCase : str = real
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Optional[int] = [1] * rank
else:
__lowerCAmelCase : Optional[Any] = rank
def __repr__( self: Optional[int]) -> List[str]:
"""simple docstring"""
return (
F"""{self.real}+"""
F"""{'+'.join(str(_SCREAMING_SNAKE_CASE)+'E'+str(n+1)for n,dual in enumerate(self.duals))}"""
)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1)
return Dual(self.real , _SCREAMING_SNAKE_CASE)
def __add__( self: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
return Dual(self.real + other , self.duals)
__lowerCAmelCase : int = self.duals.copy()
__lowerCAmelCase : List[Any] = other.duals.copy()
if len(_SCREAMING_SNAKE_CASE) > len(_SCREAMING_SNAKE_CASE):
o_dual.extend([1] * (len(_SCREAMING_SNAKE_CASE) - len(_SCREAMING_SNAKE_CASE)))
elif len(_SCREAMING_SNAKE_CASE) < len(_SCREAMING_SNAKE_CASE):
s_dual.extend([1] * (len(_SCREAMING_SNAKE_CASE) - len(_SCREAMING_SNAKE_CASE)))
__lowerCAmelCase : Tuple = []
for i in range(len(_SCREAMING_SNAKE_CASE)):
new_duals.append(s_dual[i] + o_dual[i])
return Dual(self.real + other.real , _SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE = __add__
def __sub__( self: Dict , _SCREAMING_SNAKE_CASE: str) -> Optional[Any]:
"""simple docstring"""
return self + other * -1
def __mul__( self: str , _SCREAMING_SNAKE_CASE: Tuple) -> Optional[int]:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[Any] = []
for i in self.duals:
new_duals.append(i * other)
return Dual(self.real * other , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = [0] * (len(self.duals) + len(other.duals) + 1)
for i, item in enumerate(self.duals):
for j, jtem in enumerate(other.duals):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals)):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals)):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , _SCREAMING_SNAKE_CASE)
SCREAMING_SNAKE_CASE = __mul__
def __truediv__( self: Any , _SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[str] = []
for i in self.duals:
new_duals.append(i / other)
return Dual(self.real / other , _SCREAMING_SNAKE_CASE)
raise ValueError
def __floordiv__( self: List[Any] , _SCREAMING_SNAKE_CASE: Tuple) -> str:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : str = []
for i in self.duals:
new_duals.append(i // other)
return Dual(self.real // other , _SCREAMING_SNAKE_CASE)
raise ValueError
def __pow__( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple) -> List[Any]:
"""simple docstring"""
if n < 0 or isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
raise ValueError("power must be a positive integer")
if n == 0:
return 1
if n == 1:
return self
__lowerCAmelCase : List[Any] = self
for _ in range(n - 1):
x *= self
return x
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Any:
if not callable(__snake_case ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__snake_case ,(float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__snake_case ,__snake_case ):
raise ValueError("differentiate() requires an int as input for order" )
__lowerCAmelCase : Any = Dual(__snake_case ,1 )
__lowerCAmelCase : Tuple = func(__snake_case )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _lowercase ( __snake_case ) -> Tuple:
return y**2 * y**4
print(differentiate(f, 9, 2)) | 293 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : str = logging.get_logger(__name__)
_a : Any = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = '''roberta-prelayernorm'''
def __init__( self , _lowerCAmelCase=50_265 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3_072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1e-12 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase="absolute" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
lowerCAmelCase__ :Union[str, Any] = vocab_size
lowerCAmelCase__ :List[Any] = hidden_size
lowerCAmelCase__ :Union[str, Any] = num_hidden_layers
lowerCAmelCase__ :Any = num_attention_heads
lowerCAmelCase__ :Union[str, Any] = hidden_act
lowerCAmelCase__ :Any = intermediate_size
lowerCAmelCase__ :List[Any] = hidden_dropout_prob
lowerCAmelCase__ :Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ :Tuple = max_position_embeddings
lowerCAmelCase__ :int = type_vocab_size
lowerCAmelCase__ :str = initializer_range
lowerCAmelCase__ :Any = layer_norm_eps
lowerCAmelCase__ :Optional[Any] = position_embedding_type
lowerCAmelCase__ :Union[str, Any] = use_cache
lowerCAmelCase__ :Tuple = classifier_dropout
class _UpperCAmelCase ( _A ):
"""simple docstring"""
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ :Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCAmelCase__ :Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 111 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_a : List[Any] = 16
_a : Tuple = 32
def snake_case__ ( UpperCAmelCase : Accelerator , UpperCAmelCase : int = 1_6 ):
lowerCAmelCase__ :Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCAmelCase__ :Any = load_dataset("glue" , "mrpc" )
def tokenize_function(UpperCAmelCase : Dict ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ :Any = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase__ :Any = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ :List[str] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(UpperCAmelCase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase__ :Optional[int] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase__ :int = 1_6
elif accelerator.mixed_precision != "no":
lowerCAmelCase__ :Optional[int] = 8
else:
lowerCAmelCase__ :List[Any] = None
return tokenizer.pad(
UpperCAmelCase , padding="longest" , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
lowerCAmelCase__ :Dict = DataLoader(
tokenized_datasets["train"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_a : List[Any] = mocked_dataloaders # noqa: F811
def snake_case__ ( UpperCAmelCase : Tuple , UpperCAmelCase : str ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , UpperCAmelCase ) == "1":
lowerCAmelCase__ :Tuple = 2
# New Code #
lowerCAmelCase__ :Tuple = int(args.gradient_accumulation_steps )
lowerCAmelCase__ :str = int(args.local_sgd_steps )
# Initialize accelerator
lowerCAmelCase__ :Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCAmelCase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ :Union[str, Any] = config["lr"]
lowerCAmelCase__ :int = int(config["num_epochs"] )
lowerCAmelCase__ :List[Any] = int(config["seed"] )
lowerCAmelCase__ :int = int(config["batch_size"] )
lowerCAmelCase__ :Tuple = evaluate.load("glue" , "mrpc" )
set_seed(UpperCAmelCase )
lowerCAmelCase__ ,lowerCAmelCase__ :Any = get_dataloaders(UpperCAmelCase , UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ :Dict = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ :Any = model.to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ :Optional[int] = AdamW(params=model.parameters() , lr=UpperCAmelCase )
# Instantiate scheduler
lowerCAmelCase__ :List[Any] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :Tuple = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Now we train the model
for epoch in range(UpperCAmelCase ):
model.train()
with LocalSGD(
accelerator=UpperCAmelCase , model=UpperCAmelCase , local_sgd_steps=UpperCAmelCase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(UpperCAmelCase ):
lowerCAmelCase__ :List[str] = model(**UpperCAmelCase )
lowerCAmelCase__ :List[Any] = output.loss
accelerator.backward(UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ :Dict = model(**UpperCAmelCase )
lowerCAmelCase__ :List[Any] = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ ,lowerCAmelCase__ :List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=UpperCAmelCase , references=UpperCAmelCase , )
lowerCAmelCase__ :Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase )
def snake_case__ ( ):
lowerCAmelCase__ :Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=UpperCAmelCase , default=UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=UpperCAmelCase , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
lowerCAmelCase__ :Tuple = parser.parse_args()
lowerCAmelCase__ :Any = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 111 | 1 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__snake_case :Tuple =True
except ImportError:
__snake_case :Dict =False
__snake_case :str =logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_ ( lowerCAmelCase__ : Namespace ) -> str:
'''simple docstring'''
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase__ ( _lowerCamelCase ):
@staticmethod
def __UpperCamelCase ( __UpperCamelCase : ArgumentParser ) -> Tuple:
A = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=__UpperCamelCase , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=__UpperCamelCase , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=__UpperCamelCase )
def __init__( self : Any , __UpperCamelCase : bool , __UpperCamelCase : str , __UpperCamelCase : Dict=None , *__UpperCamelCase : List[str] ) -> Optional[Any]:
A = testing
A = testing_file
A = path
def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(__UpperCamelCase ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A = (
Path(__UpperCamelCase ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__UpperCamelCase ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
A = json.load(__UpperCamelCase )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__UpperCamelCase , extra_context=__UpperCamelCase , )
A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
A = json.load(__UpperCamelCase )
A = configuration['lowercase_modelname']
A = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(f'''{directory}/configuration.json''' )
A = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A = 'Flax' in generate_tensorflow_pytorch_and_flax
A = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=__UpperCamelCase )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , 'w' ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(__UpperCamelCase : List[str] ):
with open(__UpperCamelCase , 'r' ) as f:
A = f.readlines()
with open(__UpperCamelCase , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__UpperCamelCase )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : List[str] ):
# Create temp file
A , A = mkstemp()
A = False
with fdopen(__UpperCamelCase , 'w' ) as new_file:
with open(__UpperCamelCase ) as old_file:
for line in old_file:
new_file.write(__UpperCamelCase )
if line_to_copy_below in line:
A = True
for line_to_copy in lines_to_copy:
new_file.write(__UpperCamelCase )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(__UpperCamelCase , __UpperCamelCase )
# Remove original file
remove(__UpperCamelCase )
# Move new file
move(__UpperCamelCase , __UpperCamelCase )
def skip_units(__UpperCamelCase : List[Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__UpperCamelCase : Tuple ):
with open(__UpperCamelCase ) as datafile:
A = []
A = False
A = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(__UpperCamelCase )
elif "# Below: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(__UpperCamelCase )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = []
elif "# Replace with" in line and "##" not in line:
A = []
elif "##" not in line:
lines_to_copy.append(__UpperCamelCase )
remove(__UpperCamelCase )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(__UpperCamelCase ) | 106 |
"""simple docstring"""
def UpperCAmelCase ( A : list[int] , A : list[int] ):
'''simple docstring'''
if not len(A ) == len(A ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = equationa
# Calculate the determinants of the matrices
_UpperCAmelCase = aa * ba - aa * ba
_UpperCAmelCase = ca * ba - ca * ba
_UpperCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_UpperCAmelCase = determinant_x / determinant
_UpperCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 573 | 0 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCamelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
@register_to_config
def __init__( self : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Tuple = None ) -> List[str]:
'''simple docstring'''
super().__init__()
UpperCamelCase__ : int = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase__ : Union[str, Any] = torch.zeros(_lowercase , _lowercase )
else:
UpperCamelCase__ : Tuple = None
UpperCamelCase__ : Optional[Any] = torch.nn.Parameter(_lowercase )
class __magic_name__ ( SCREAMING_SNAKE_CASE__):
A: Any = 4_2
A: int = 4_2
A: str = 4_2
A: Union[str, Any] = 4_2
A: Optional[int] = 4_2
A: Optional[Any] = 4_2
def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=_lowercase , transformer=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int ) -> int:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = len(_lowercase ) if isinstance(_lowercase , _lowercase ) else 1
# get prompt text embeddings
UpperCamelCase__ : Dict = self.tokenizer(
_lowercase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
UpperCamelCase__ : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase__ : Any = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCamelCase__ : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase__ : Any = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase__ : Union[str, Any] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_lowercase )
# duplicate text embeddings for each generation per prompt
UpperCamelCase__ : Tuple = prompt_embeds.repeat_interleave(_lowercase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase__ : Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase__ : Any = negative_prompt_embeds.unsqueeze(0 ).repeat(_lowercase , 1 , 1 )
else:
UpperCamelCase__ : Union[str, Any] = [""""""] * batch_size
UpperCamelCase__ : Tuple = text_input_ids.shape[-1]
UpperCamelCase__ : Dict = self.tokenizer(
_lowercase , padding='''max_length''' , max_length=_lowercase , truncation=_lowercase , return_tensors='''pt''' , )
UpperCamelCase__ : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase__ : Optional[int] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_lowercase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ : List[Any] = negative_prompt_embeds.shape[1]
UpperCamelCase__ : int = negative_prompt_embeds.repeat(1 , _lowercase , 1 )
UpperCamelCase__ : int = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] = 100 , lowerCamelCase__ : Dict = 5.0 , lowerCamelCase__ : Dict = 1.0 , lowerCamelCase__ : Optional[Any] = 1 , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : List[str] = "pil" , lowerCamelCase__ : Tuple = True , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : List[Any] = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(_lowercase , _lowercase ):
UpperCamelCase__ : Optional[Any] = 1
elif isinstance(_lowercase , _lowercase ):
UpperCamelCase__ : List[str] = len(_lowercase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(_lowercase )}" )
UpperCamelCase__ : List[str] = batch_size * num_images_per_prompt
UpperCamelCase__ : List[str] = guidance_scale > 1.0
UpperCamelCase__ : Tuple = self._encode_prompt(_lowercase , _lowercase , _lowercase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_lowercase )}." )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase__ : Optional[Any] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase__ : Optional[Any] = self.transformer.num_vector_embeds - 1
UpperCamelCase__ : List[str] = torch.full(_lowercase , _lowercase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
F" {self.transformer.num_vector_embeds - 1} (inclusive)." )
UpperCamelCase__ : Optional[int] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowercase , device=self.device )
UpperCamelCase__ : Optional[Any] = self.scheduler.timesteps.to(self.device )
UpperCamelCase__ : int = latents
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase__ : Dict = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase__ : Dict = self.transformer(_lowercase , encoder_hidden_states=_lowercase , timestep=_lowercase ).sample
if do_classifier_free_guidance:
UpperCamelCase__ : Any = model_output.chunk(2 )
UpperCamelCase__ : Any = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_lowercase , dim=1 , keepdim=_lowercase )
UpperCamelCase__ : str = self.truncate(_lowercase , _lowercase )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase__ : Union[str, Any] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ : List[str] = self.scheduler.step(_lowercase , timestep=_lowercase , sample=_lowercase , generator=_lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase , _lowercase )
UpperCamelCase__ : str = self.vqvae.config.vq_embed_dim
UpperCamelCase__ : Optional[Any] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase__ : Optional[int] = self.vqvae.quantize.get_codebook_entry(_lowercase , shape=_lowercase )
UpperCamelCase__ : int = self.vqvae.decode(_lowercase , force_not_quantize=_lowercase ).sample
UpperCamelCase__ : int = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ : Any = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str ) -> torch.FloatTensor:
'''simple docstring'''
UpperCamelCase__ : List[Any] = torch.sort(_lowercase , 1 , descending=_lowercase )
UpperCamelCase__ : Optional[Any] = torch.exp(_lowercase )
UpperCamelCase__ : Optional[Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase__ : Optional[Any] = torch.full_like(keep_mask[:, 0:1, :] , _lowercase )
UpperCamelCase__ : Tuple = torch.cat((all_true, keep_mask) , dim=1 )
UpperCamelCase__ : int = keep_mask[:, :-1, :]
UpperCamelCase__ : List[Any] = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCamelCase__ : int = log_p_x_0.clone()
UpperCamelCase__ : Any = -torch.inf # -inf = log(0)
return rv
| 721 |
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _a ( SCREAMING_SNAKE_CASE : Namespace ):
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__UpperCamelCase : List[Any] = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class __magic_name__ ( __lowerCAmelCase):
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase__ : ArgumentParser ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : int = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=lowerCamelCase__ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=lowerCamelCase__ , default=lowerCamelCase__ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Optional[int] , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F"Loading model {model_type}" )
UpperCamelCase__ : List[str] = model_type
UpperCamelCase__ : Optional[int] = tf_checkpoint
UpperCamelCase__ : List[Any] = pytorch_dump_output
UpperCamelCase__ : List[Any] = config
UpperCamelCase__ : Any = finetuning_task_name
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCamelCase__ : str = self._tf_checkpoint
UpperCamelCase__ : List[Any] = ''''''
else:
UpperCamelCase__ : Any = self._tf_checkpoint
UpperCamelCase__ : List[Any] = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
lowerCamelCase__ , self._config , self._pytorch_dump_output , lowerCamelCase__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCamelCase__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 106 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
A : Dict = logging.get_logger(__name__)
A : Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A : int = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
A : Any = {
'distilbert-base-uncased': 5_1_2,
'distilbert-base-uncased-distilled-squad': 5_1_2,
'distilbert-base-cased': 5_1_2,
'distilbert-base-cased-distilled-squad': 5_1_2,
'distilbert-base-german-cased': 5_1_2,
'distilbert-base-multilingual-cased': 5_1_2,
}
A : Optional[int] = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class A ( UpperCAmelCase__ ):
'''simple docstring'''
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = PRETRAINED_INIT_CONFIGURATION
A__ = ['''input_ids''', '''attention_mask''']
A__ = DistilBertTokenizer
def __init__(self : List[Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : List[Any]="[UNK]" , _UpperCAmelCase : List[Any]="[SEP]" , _UpperCAmelCase : Any="[PAD]" , _UpperCAmelCase : List[Any]="[CLS]" , _UpperCAmelCase : str="[MASK]" , _UpperCAmelCase : int=True , _UpperCAmelCase : Optional[Any]=None , **_UpperCAmelCase : str , ) -> int:
"""simple docstring"""
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _UpperCAmelCase ) != tokenize_chinese_chars
):
lowercase__ = getattr(_UpperCAmelCase , normalizer_state.pop("""type""" ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**_UpperCAmelCase )
lowercase__ = do_lower_case
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Dict=None ) -> Any:
"""simple docstring"""
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ (self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 15 |
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
def __init__( self : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict=1_3 , __UpperCAmelCase : Optional[Any]=3_0 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : List[Any]=3_2 , __UpperCAmelCase : int=5 , __UpperCAmelCase : Union[str, Any]=4 , __UpperCAmelCase : Union[str, Any]=3_7 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : Optional[Any]=1_0 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Any=0.6 , __UpperCAmelCase : Dict=None , ) -> str:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = mask_ratio
SCREAMING_SNAKE_CASE__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str ) -> Dict:
SCREAMING_SNAKE_CASE__ = ViTMAEModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = (self.image_size // self.patch_size) ** 2
SCREAMING_SNAKE_CASE__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = model(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (A__ ,A__ ,unittest.TestCase ):
lowerCamelCase__ : Optional[int] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
lowerCamelCase__ : str = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : str = False
lowerCamelCase__ : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = ViTMAEModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] ) -> List[Any]:
# make masks reproducible
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(__UpperCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
SCREAMING_SNAKE_CASE__ = pt_noise
super().check_pt_tf_models(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(__UpperCAmelCase )
model.to(__UpperCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
# Make sure we don't have nans
SCREAMING_SNAKE_CASE__ = after_outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCAmelCase , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = ViTMAEModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
SCREAMING_SNAKE_CASE__ = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
SCREAMING_SNAKE_CASE__ = ViTMAEConfig()
SCREAMING_SNAKE_CASE__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE__ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase , noise=torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase ) )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1_9_6, 7_6_8) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__UpperCAmelCase ) , atol=1e-4 ) )
| 196 | 0 |
'''simple docstring'''
import math
import random
def lowerCamelCase__ ( a , a = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_lowercase = 0.02
def lowerCamelCase__ ( a , a ):
__snake_case = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(a ):
# Forward propagation
__snake_case = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__snake_case = (expected / 100) - layer_a
# Error delta
__snake_case = layer_1_error * sigmoid_function(a , a )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = int(input("""Expected value: """))
_lowercase = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 705 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Any=1_3 , __lowerCAmelCase : Any=3_0 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Any=3_2 , __lowerCAmelCase : int=2 , __lowerCAmelCase : str=4 , __lowerCAmelCase : str=3_7 , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=1_0 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Any=3 , __lowerCAmelCase : int=None , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def lowercase__ ( self : Union[str, Any] ):
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Optional[int] ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : List[str] ):
__snake_case = TFViTModel(config=__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__snake_case = self.image_size // 2
__snake_case = pixel_values[:, :, :image_size, :image_size]
__snake_case = model(__lowerCAmelCase , interpolate_pos_encoding=__lowerCAmelCase , training=__lowerCAmelCase )
__snake_case = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any ):
__snake_case = self.type_sequence_label_size
__snake_case = TFViTForImageClassification(__lowerCAmelCase )
__snake_case = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__snake_case = self.image_size // 2
__snake_case = pixel_values[:, :, :image_size, :image_size]
__snake_case = model(__lowerCAmelCase , interpolate_pos_encoding=__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case = 1
__snake_case = TFViTForImageClassification(__lowerCAmelCase )
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : List[str] ):
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowercase_ : Any = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowercase_ : Optional[Any] = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
lowercase_ : Optional[int] = False
lowercase_ : Optional[int] = False
lowercase_ : Optional[Any] = False
def lowercase__ ( self : int ):
__snake_case = TFViTModelTester(self )
__snake_case = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=3_7 )
def lowercase__ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def lowercase__ ( self : Tuple ):
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , tf.keras.layers.Layer ) )
def lowercase__ ( self : Any ):
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(__lowerCAmelCase )
__snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def lowercase__ ( self : Dict ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowercase__ ( self : Tuple ):
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def lowercase__ ( self : Dict ):
__snake_case = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(__lowerCAmelCase )
def lowerCamelCase__ ( ):
__snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Union[str, Any] ):
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def lowercase__ ( self : Union[str, Any] ):
__snake_case = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=__lowerCAmelCase , return_tensors='tf' )
# forward pass
__snake_case = model(**__lowerCAmelCase )
# verify the logits
__snake_case = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
__snake_case = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 )
| 427 | 0 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_2 , snake_case_=3 , snake_case_=4 , snake_case_=[1_0, 2_0, 3_0, 4_0] , snake_case_=[2, 2, 3, 2] , snake_case_=True , snake_case_=True , snake_case_=3_7 , snake_case_="gelu" , snake_case_=1_0 , snake_case_=0.0_2 , snake_case_=["stage2", "stage3", "stage4"] , snake_case_=[2, 3, 4] , snake_case_=None , ) -> Tuple:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = num_stages
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = out_features
__lowercase = out_indices
__lowercase = scope
def A ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def A ( self ) -> Optional[Any]:
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
'''simple docstring'''
__lowercase = ConvNextVaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[Any]:
'''simple docstring'''
__lowercase = ConvNextVaForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
'''simple docstring'''
__lowercase = ConvNextVaBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowercase = None
__lowercase = ConvNextVaBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self ) -> int:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{"feature-extraction": ConvNextVaModel, "image-classification": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def A ( self ) -> int:
'''simple docstring'''
__lowercase = ConvNextVaModelTester(self )
__lowercase = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=3_7 )
def A ( self ) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self ) -> Tuple:
'''simple docstring'''
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def A ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def A ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def A ( self ) -> Any:
'''simple docstring'''
pass
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__lowercase = self.model_tester.prepare_config_and_inputs_with_labels()
__lowercase = True
if model_class.__name__ in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]:
continue
__lowercase = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
__lowercase = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
__lowercase = model(**_lowerCamelCase ).loss
loss.backward()
def A ( self ) -> Dict:
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__lowercase = self.model_tester.prepare_config_and_inputs_with_labels()
__lowercase = False
__lowercase = True
if (
model_class.__name__
in [*get_values(_lowerCamelCase ), *get_values(_lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
__lowercase = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
__lowercase = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
__lowercase = model(**_lowerCamelCase ).loss
loss.backward()
def A ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCamelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def A ( self ) -> str:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def A ( self ) -> int:
'''simple docstring'''
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
__lowercase = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def A ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ConvNextVaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def lowercase_ ( ):
'''simple docstring'''
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self ) -> Optional[Any]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_lowerCamelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = preprocessor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )
# verify the logits
__lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
__lowercase = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
| 639 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_lowerCamelCase = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self :List[str] , _lowerCamelCase :str , _lowerCamelCase :bool , _lowerCamelCase :str = None , _lowerCamelCase :list = None ):
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.abspath('''examples''' )
for item in os.listdir(_lowerCamelCase ):
if item not in EXCLUDE_EXAMPLES:
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=_lowerCamelCase , feature_script=_lowerCamelCase , tested_section='''main()''' if parser_only else '''training_function()''' , ):
__SCREAMING_SNAKE_CASE : Tuple = compare_against_test(
os.path.join(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''\n'''.join(_lowerCamelCase )
if special_strings is not None:
for string in special_strings:
__SCREAMING_SNAKE_CASE : List[Any] = diff.replace(_lowerCamelCase , '''''' )
self.assertEqual(_lowerCamelCase , '''''' )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
self.one_complete_example('''complete_nlp_example.py''' , _lowerCamelCase )
self.one_complete_example('''complete_nlp_example.py''' , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
__SCREAMING_SNAKE_CASE : Optional[int] = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.one_complete_example('''complete_cv_example.py''' , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = False
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls :Dict ):
super().setUpClass()
__SCREAMING_SNAKE_CASE : Dict = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__SCREAMING_SNAKE_CASE : List[Any] = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls :Dict ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
__SCREAMING_SNAKE_CASE : str = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
__SCREAMING_SNAKE_CASE : Optional[Any] = f'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
__SCREAMING_SNAKE_CASE : Optional[int] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE : Optional[int] = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
'''.split()
__SCREAMING_SNAKE_CASE : Any = run_command(self._launch_args + testargs , return_stdout=_lowerCamelCase )
self.assertNotIn('''epoch 0:''' , _lowerCamelCase )
self.assertIn('''epoch 1:''' , _lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Optional[int] = f'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
'''.split()
__SCREAMING_SNAKE_CASE : List[str] = run_command(self._launch_args + testargs , return_stdout=_lowerCamelCase )
if torch.cuda.is_available():
__SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.device_count()
else:
__SCREAMING_SNAKE_CASE : Optional[int] = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , _lowerCamelCase )
self.assertIn('''epoch 1:''' , _lowerCamelCase )
else:
self.assertIn('''epoch 0:''' , _lowerCamelCase )
self.assertIn('''epoch 1:''' , _lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE_ ( self :str ):
__SCREAMING_SNAKE_CASE : Optional[Any] = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = run_command(self._launch_args + testargs , return_stdout=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Any = re.findall('''({.+})''' , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = [r for r in results if '''accuracy''' in r][-1]
__SCREAMING_SNAKE_CASE : Tuple = ast.literal_eval(_lowerCamelCase )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def SCREAMING_SNAKE_CASE_ ( self :str ):
__SCREAMING_SNAKE_CASE : Optional[Any] = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
with tempfile.TemporaryDirectory() as tmpdir:
__SCREAMING_SNAKE_CASE : int = f'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_lowerCamelCase , '''tracking''' ) ) )
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
__SCREAMING_SNAKE_CASE : List[Any] = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 674 | 0 |
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
A_ : Tuple = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def A ( snake_case__=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A__ ) )
class lowerCamelCase (A__ ):
lowerCamelCase__ : str = None
lowerCamelCase__ : int = None
def SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ) -> Union[str, Any]:
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = dataset_module_factory(__UpperCAmelCase , cache_dir=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = import_main_class(dataset_module.module_path , dataset=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = builder_cls(
cache_dir=__UpperCAmelCase , config_name=__UpperCAmelCase , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE__ = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__UpperCAmelCase ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
SCREAMING_SNAKE_CASE__ = cached_path(__UpperCAmelCase , cache_dir=__UpperCAmelCase )
self.assertTrue(os.path.exists(__UpperCAmelCase ) )
@pytest.mark.integration
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
SCREAMING_SNAKE_CASE__ = dataset_module_factory("""wikipedia""" , cache_dir=snake_case__ )
SCREAMING_SNAKE_CASE__ = import_main_class(dataset_module.module_path )
SCREAMING_SNAKE_CASE__ = builder_cls(
cache_dir=snake_case__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
SCREAMING_SNAKE_CASE__ = None
builder_instance.download_and_prepare()
SCREAMING_SNAKE_CASE__ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = dataset_module_factory("""wikipedia""" , cache_dir=snake_case__ )
SCREAMING_SNAKE_CASE__ = import_main_class(dataset_module.module_path , dataset=snake_case__ )
SCREAMING_SNAKE_CASE__ = builder_cls(
cache_dir=snake_case__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
SCREAMING_SNAKE_CASE__ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(snake_case__ , snake_case__ )
assert "train" in ds
assert isinstance(ds["""train"""] , snake_case__ )
assert next(iter(ds["""train"""] ) )
| 616 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
A_ : List[str] = random.Random()
def A ( snake_case__ , snake_case__=1.0 , snake_case__=None , snake_case__=None ):
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE__ = global_rng
SCREAMING_SNAKE_CASE__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase (unittest.TestCase ):
def __init__( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : List[str]=7 , __UpperCAmelCase : List[str]=4_0_0 , __UpperCAmelCase : Tuple=2_0_0_0 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : str=0.0 , __UpperCAmelCase : int=1_6_0_0_0 , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=8_0 , __UpperCAmelCase : Dict=1_6 , __UpperCAmelCase : int=6_4 , __UpperCAmelCase : Any="hann_window" , __UpperCAmelCase : Tuple=8_0 , __UpperCAmelCase : Tuple=7_6_0_0 , __UpperCAmelCase : List[Any]=1e-10 , __UpperCAmelCase : Tuple=True , ) -> int:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = min_seq_length
SCREAMING_SNAKE_CASE__ = max_seq_length
SCREAMING_SNAKE_CASE__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__ = feature_size
SCREAMING_SNAKE_CASE__ = padding_value
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = do_normalize
SCREAMING_SNAKE_CASE__ = num_mel_bins
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = win_length
SCREAMING_SNAKE_CASE__ = win_function
SCREAMING_SNAKE_CASE__ = fmin
SCREAMING_SNAKE_CASE__ = fmax
SCREAMING_SNAKE_CASE__ = mel_floor
SCREAMING_SNAKE_CASE__ = return_attention_mask
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Dict=False , __UpperCAmelCase : List[str]=False ) -> Optional[Any]:
def _flatten(__UpperCAmelCase : List[Any] ):
return list(itertools.chain(*__UpperCAmelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE__ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Optional[int]=False ) -> str:
if equal_length:
SCREAMING_SNAKE_CASE__ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ = [np.asarray(__UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase (A__ ,unittest.TestCase ):
lowerCamelCase__ : Any = SpeechTaFeatureExtractor
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ = SpeechTaFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Any ) -> Tuple:
self.assertTrue(np.all(np.mean(__UpperCAmelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCAmelCase , axis=0 ) - 1 ) < 1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
SCREAMING_SNAKE_CASE__ = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__ = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
SCREAMING_SNAKE_CASE__ = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ = feat_extract(__UpperCAmelCase , return_tensors="""np""" ).input_values
SCREAMING_SNAKE_CASE__ = feat_extract(__UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
SCREAMING_SNAKE_CASE__ = ["""longest""", """max_length""", """do_not_pad"""]
SCREAMING_SNAKE_CASE__ = [None, 1_6_0_0, None]
for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = feat_extract(__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = range(8_0_0 , 1_4_0_0 , 2_0_0 )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in lengths]
SCREAMING_SNAKE_CASE__ = ["""longest""", """max_length""", """do_not_pad"""]
SCREAMING_SNAKE_CASE__ = [None, 1_6_0_0, None]
for max_length, padding in zip(__UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = feat_extract(__UpperCAmelCase , max_length=__UpperCAmelCase , padding=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
SCREAMING_SNAKE_CASE__ = feat_extract(
__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=1_0_0_0 , padding="""max_length""" , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
SCREAMING_SNAKE_CASE__ = feat_extract(
__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=1_0_0_0 , padding="""longest""" , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
SCREAMING_SNAKE_CASE__ = feat_extract(
__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=2_0_0_0 , padding="""longest""" , return_tensors="""np""" )
SCREAMING_SNAKE_CASE__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ = np.random.rand(1_0_0 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__ = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__ = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
SCREAMING_SNAKE_CASE__ = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs]
# Test feature size
SCREAMING_SNAKE_CASE__ = feature_extractor(audio_target=__UpperCAmelCase , padding=__UpperCAmelCase , return_tensors="""np""" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
SCREAMING_SNAKE_CASE__ = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_values
SCREAMING_SNAKE_CASE__ = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
SCREAMING_SNAKE_CASE__ = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
SCREAMING_SNAKE_CASE__ = np.asarray(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
SCREAMING_SNAKE_CASE__ = feature_extractor(__UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ):
self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__UpperCAmelCase ) == len(__UpperCAmelCase ) for x, y in zip(__UpperCAmelCase , processed_features[input_name] ) ) )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
SCREAMING_SNAKE_CASE__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
SCREAMING_SNAKE_CASE__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
SCREAMING_SNAKE_CASE__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ = feat_extract.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
SCREAMING_SNAKE_CASE__ = feat_extract.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE__ = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ = [len(__UpperCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ = feat_extract.pad(__UpperCAmelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = self.feat_extract_dict
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.feat_extract_tester.prepare_inputs_for_target()
SCREAMING_SNAKE_CASE__ = [len(__UpperCAmelCase ) for x in speech_inputs]
SCREAMING_SNAKE_CASE__ = feat_extract.model_input_names[0]
SCREAMING_SNAKE_CASE__ = BatchFeature({input_name: speech_inputs} )
SCREAMING_SNAKE_CASE__ = min(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = feat_extract.num_mel_bins # hack!
SCREAMING_SNAKE_CASE__ = feat_extract.pad(
__UpperCAmelCase , padding="""max_length""" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : int ) -> str:
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__ = ds.sort("""id""" ).select(range(__UpperCAmelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
# fmt: off
SCREAMING_SNAKE_CASE__ = torch.tensor(
[2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03,
3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03,
2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04,
4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03,
7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04,
4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] )
# fmt: on
SCREAMING_SNAKE_CASE__ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ = feature_extractor(__UpperCAmelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , __UpperCAmelCase , atol=1e-6 ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
# fmt: off
SCREAMING_SNAKE_CASE__ = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
SCREAMING_SNAKE_CASE__ = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ = SpeechTaFeatureExtractor()
SCREAMING_SNAKE_CASE__ = feature_extractor(audio_target=__UpperCAmelCase , return_tensors="""pt""" ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , __UpperCAmelCase , atol=1e-4 ) )
| 616 | 1 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def a__ ( a__ ):
"""simple docstring"""
if num <= 0:
raise ValueError("""math domain error""" )
return quad(a__ , 0 , a__ , args=(a__) )[0]
def a__ ( a__ , a__ ):
"""simple docstring"""
return math.pow(a__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 627 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = tokenizer(example["""content"""] , truncation=a__ )["""input_ids"""]
__SCREAMING_SNAKE_CASE = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase : Optional[Any] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase : List[Any] = multiprocessing.cpu_count()
UpperCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase : Optional[int] = time.time()
UpperCAmelCase : Any = load_dataset(args.dataset_name, split='train')
print(f"""Dataset loaded in {time.time()-t_start:.2f}s""")
UpperCAmelCase : List[str] = time.time()
UpperCAmelCase : List[str] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(f"""Dataset tokenized in {time.time()-t_start:.2f}s""")
UpperCAmelCase : List[Any] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 627 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : List[str] = {
'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegatronBertForCausalLM',
'MegatronBertForMaskedLM',
'MegatronBertForMultipleChoice',
'MegatronBertForNextSentencePrediction',
'MegatronBertForPreTraining',
'MegatronBertForQuestionAnswering',
'MegatronBertForSequenceClassification',
'MegatronBertForTokenClassification',
'MegatronBertModel',
'MegatronBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 164 | import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : Tuple = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = '▁'
__lowerCAmelCase : int = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__lowerCAmelCase : int = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__lowerCAmelCase : int = {'vinai/bartpho-syllable': 1024}
class lowerCamelCase ( __snake_case ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
'''simple docstring'''
snake_case: Any = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
snake_case: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
snake_case: Tuple = vocab_file
snake_case: Dict = monolingual_vocab_file
snake_case: str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
snake_case: int = {}
snake_case: int = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case: Optional[int] = cnt
cnt += 1
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
for line in f.readlines():
snake_case: Optional[int] = line.strip().split()[0]
snake_case: List[Any] = len(self.fairseq_tokens_to_ids )
if str(__lowerCamelCase ) not in self.fairseq_tokens_to_ids:
snake_case: Tuple = len(self.fairseq_tokens_to_ids )
snake_case: str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> int:
'''simple docstring'''
snake_case: Optional[Any] = self.__dict__.copy()
snake_case: List[str] = None
snake_case: Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __lowerCamelCase ) -> Tuple:
'''simple docstring'''
snake_case: str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case: List[Any] = {}
snake_case: List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCAmelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case: Optional[int] = [self.cls_token_id]
snake_case: Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def lowerCAmelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case: Tuple = [self.sep_token_id]
snake_case: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
snake_case: Union[str, Any] = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase_ ( self , __lowerCamelCase ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def lowerCAmelCase_ ( self , __lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCAmelCase_ ( self , __lowerCamelCase ) -> Dict:
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def lowerCAmelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
snake_case: Tuple = """""".join(__lowerCamelCase ).replace(__lowerCamelCase , """ """ ).strip()
return out_string
def lowerCAmelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case: Tuple = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case: Union[str, Any] = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""monolingual_vocab_file"""] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , """wb""" ) as fi:
snake_case: str = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"{str(__lowerCamelCase )} \n" )
return out_vocab_file, out_monolingual_vocab_file
| 164 | 1 |
'''simple docstring'''
from math import factorial
def __snake_case ( _UpperCAmelCase : int = 20):
UpperCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCamelCase = n // 2
return int(factorial(_UpperCAmelCase) / (factorial(_UpperCAmelCase) * factorial(n - k)))
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
snake_case_ : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 212 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = tempfile.mkdtemp()
UpperCamelCase = BlipImageProcessor()
UpperCamelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
UpperCamelCase = BlipProcessor(lowerCamelCase__ , lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **lowerCamelCase__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).tokenizer
def UpperCAmelCase ( self , **lowerCamelCase__ ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).image_processor
def UpperCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCamelCase = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
UpperCamelCase = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = image_processor(lowerCamelCase__ , return_tensors='''np''' )
UpperCamelCase = processor(images=lowerCamelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
UpperCamelCase = '''lower newer'''
UpperCamelCase = processor(text=lowerCamelCase__ )
UpperCamelCase = tokenizer(lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
UpperCamelCase = '''lower newer'''
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase = processor.batch_decode(lowerCamelCase__ )
UpperCamelCase = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def UpperCAmelCase ( self ):
'''simple docstring'''
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
UpperCamelCase = '''lower newer'''
UpperCamelCase = self.prepare_image_inputs()
UpperCamelCase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 212 | 1 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
assert isinstance(_lowerCAmelCase , _lowerCAmelCase)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True])
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = tmp_path / "cache"
UpperCamelCase_ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase_ = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase).read()
_check_sql_dataset(_lowerCAmelCase , _lowerCAmelCase)
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = tmp_path / "cache"
UpperCamelCase_ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase_ = features.copy() if features else default_expected_features
UpperCamelCase_ = (
Features({feature: Value(_lowerCAmelCase) for feature, dtype in features.items()}) if features is not None else None
)
UpperCamelCase_ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase).read()
_check_sql_dataset(_lowerCAmelCase , _lowerCAmelCase)
def _lowerCAmelCase (_lowerCAmelCase):
with contextlib.closing(sqlitea.connect(_lowerCAmelCase)) as con:
UpperCamelCase_ = con.cursor()
cur.execute("SELECT * FROM dataset")
for row in cur:
yield row
@require_sqlalchemy
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = tmp_path / "cache"
UpperCamelCase_ = os.path.join(_lowerCAmelCase , "tmp.sql")
UpperCamelCase_ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=_lowerCAmelCase).read()
SqlDatasetWriter(_lowerCAmelCase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1).write()
UpperCamelCase_ = iter_sql_file(_lowerCAmelCase)
UpperCamelCase_ = iter_sql_file(_lowerCAmelCase)
for rowa, rowa in zip(_lowerCAmelCase , _lowerCAmelCase):
assert rowa == rowa
@require_sqlalchemy
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = tmp_path / "cache"
UpperCamelCase_ = os.path.join(_lowerCAmelCase , "tmp.sql")
UpperCamelCase_ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=_lowerCAmelCase).read()
SqlDatasetWriter(_lowerCAmelCase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2).write()
UpperCamelCase_ = iter_sql_file(_lowerCAmelCase)
UpperCamelCase_ = iter_sql_file(_lowerCAmelCase)
for rowa, rowa in zip(_lowerCAmelCase , _lowerCAmelCase):
assert rowa == rowa
@require_sqlalchemy
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = tmp_path / "cache"
UpperCamelCase_ = os.path.join(_lowerCAmelCase , "tmp.sql")
UpperCamelCase_ = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=_lowerCAmelCase).read()
with pytest.raises(_lowerCAmelCase):
SqlDatasetWriter(_lowerCAmelCase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0).write()
| 709 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = (CMStochasticIterativeScheduler,)
lowercase__ = 10
def _lowerCamelCase ( self , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**snake_case__ )
return config
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = 10
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = self.scheduler_classes[0](**snake_case__ )
scheduler.set_timesteps(snake_case__ )
UpperCamelCase_ = scheduler.timesteps[0]
UpperCamelCase_ = scheduler.timesteps[1]
UpperCamelCase_ = self.dummy_sample
UpperCamelCase_ = 0.1 * sample
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**snake_case__ )
UpperCamelCase_ = 1
scheduler.set_timesteps(snake_case__ )
UpperCamelCase_ = scheduler.timesteps
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(snake_case__ ):
# 1. scale model input
UpperCamelCase_ = scheduler.scale_model_input(snake_case__ , snake_case__ )
# 2. predict noise residual
UpperCamelCase_ = model(snake_case__ , snake_case__ )
# 3. predict previous sample x_t-1
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
UpperCamelCase_ = pred_prev_sample
UpperCamelCase_ = torch.sum(torch.abs(snake_case__ ) )
UpperCamelCase_ = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**snake_case__ )
UpperCamelCase_ = [106, 0]
scheduler.set_timesteps(timesteps=snake_case__ )
UpperCamelCase_ = scheduler.timesteps
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase_ = scheduler.scale_model_input(snake_case__ , snake_case__ )
# 2. predict noise residual
UpperCamelCase_ = model(snake_case__ , snake_case__ )
# 3. predict previous sample x_t-1
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
UpperCamelCase_ = pred_prev_sample
UpperCamelCase_ = torch.sum(torch.abs(snake_case__ ) )
UpperCamelCase_ = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**snake_case__ )
UpperCamelCase_ = [39, 30, 12, 15, 0]
with self.assertRaises(snake_case__ , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**snake_case__ )
UpperCamelCase_ = [39, 30, 12, 1, 0]
UpperCamelCase_ = len(snake_case__ )
with self.assertRaises(snake_case__ , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=snake_case__ , timesteps=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**snake_case__ )
UpperCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case__ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=snake_case__ )
| 504 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 157 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : str = '''bert-generation'''
def __init__( self : Tuple , __lowerCamelCase : Optional[int]=5_0_3_5_8 , __lowerCamelCase : List[str]=1_0_2_4 , __lowerCamelCase : Optional[Any]=2_4 , __lowerCamelCase : Any=1_6 , __lowerCamelCase : Union[str, Any]=4_0_9_6 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=5_1_2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Tuple=1E-12 , __lowerCamelCase : Any=0 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]="absolute" , __lowerCamelCase : str=True , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
| 103 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case (A_ :Any , A_ :int , A_ :Optional[int] , A_ :Optional[int] ):
'''simple docstring'''
a : int = s.rsplit(A_ , A_ )
return new.join(A_ )
def snake_case (A_ :Any ):
'''simple docstring'''
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def snake_case (A_ :Tuple ):
'''simple docstring'''
a : Any = {}
a : str = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
a : Optional[int] = key.replace(f'''{group_key}.''' , f'''{group_key}.group.''' )
if "res_path" in key:
a : int = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
a : Tuple = rreplace(A_ , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
a : int = rreplace(A_ , '.b' , '.bias' , 1 )
a : Dict = value.float()
return upgrade
@torch.no_grad()
def snake_case (A_ :int , A_ :List[Any] , A_ :Optional[int]=None , A_ :List[str]=True ):
'''simple docstring'''
from dall_e import Encoder
a : int = Encoder()
if os.path.exists(A_ ):
a : int = torch.load(A_ )
else:
a : int = torch.hub.load_state_dict_from_url(A_ )
if isinstance(A_ , A_ ):
a : List[Any] = ckpt.state_dict()
encoder.load_state_dict(A_ )
if config_path is not None:
a : int = FlavaImageCodebookConfig.from_pretrained(A_ )
else:
a : List[str] = FlavaImageCodebookConfig()
a : Optional[int] = FlavaImageCodebook(A_ ).eval()
a : List[str] = encoder.state_dict()
a : List[Any] = upgrade_state_dict(A_ )
hf_model.load_state_dict(A_ )
a : Optional[Any] = hf_model.state_dict()
a : Tuple = count_parameters(A_ )
a : List[str] = count_parameters(A_ )
assert torch.allclose(A_ , A_ , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(A_ )
else:
return hf_state_dict
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_UpperCamelCase : int = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 118 |
"""simple docstring"""
def snake_case (A_ :int ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
a : Any = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
a : Optional[int] = 1
if upper_limit > 0:
a : List[str] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(A_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
_UpperCamelCase : Union[str, Any] = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 118 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Dict =KandinskyInpaintPipeline
_UpperCAmelCase : str =["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
_UpperCAmelCase : Any =[
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
_UpperCAmelCase : Optional[Any] =[
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCAmelCase : Dict =False
@property
def _UpperCAmelCase ( self : str ):
return 32
@property
def _UpperCAmelCase ( self : Dict ):
return 32
@property
def _UpperCAmelCase ( self : Any ):
return self.time_input_dim
@property
def _UpperCAmelCase ( self : Optional[Any] ):
return self.time_input_dim * 4
@property
def _UpperCAmelCase ( self : Optional[Any] ):
return 1_00
@property
def _UpperCAmelCase ( self : Tuple ):
A_ = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
A_ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
A_ = MultilingualCLIP(snake_case_ )
A_ = text_encoder.eval()
return text_encoder
@property
def _UpperCAmelCase ( self : Dict ):
torch.manual_seed(0 )
A_ = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
A_ = UNetaDConditionModel(**snake_case_ )
return model
@property
def _UpperCAmelCase ( self : List[Any] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
A_ = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCAmelCase ( self : Optional[Any] ):
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_unet
A_ = self.dummy_movq
A_ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , steps_offset=1 , prediction_type="epsilon" , thresholding=snake_case_ , )
A_ = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : str=0 ):
A_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
A_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(snake_case_ )
# create init_image
A_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
A_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A_ = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((2_56, 2_56) )
# create mask
A_ = np.ones((64, 64) , dtype=np.floataa )
A_ = 0
if str(snake_case_ ).startswith("mps" ):
A_ = torch.manual_seed(snake_case_ )
else:
A_ = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
A_ = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _UpperCAmelCase ( self : Optional[Any] ):
A_ = "cpu"
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**snake_case_ )
A_ = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
A_ = pipe(**self.get_dummy_inputs(snake_case_ ) )
A_ = output.images
A_ = pipe(
**self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0]
A_ = image[0, -3:, -3:, -1]
A_ = image_from_tuple[0, -3:, -3:, -1]
print(F"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
A_ = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def _UpperCAmelCase ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Optional[Any] ):
A_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
A_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
A_ = np.ones((7_68, 7_68) , dtype=np.floataa )
A_ = 0
A_ = "a hat"
A_ = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(snake_case_ )
A_ = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
A_ = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
A_ = torch.Generator(device="cpu" ).manual_seed(0 )
A_ , A_ = pipe_prior(
snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
A_ = pipeline(
snake_case_ , image=snake_case_ , mask_image=snake_case_ , image_embeds=snake_case_ , negative_image_embeds=snake_case_ , generator=snake_case_ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="np" , )
A_ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 452 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = BarthezTokenizer
UpperCamelCase__ = BarthezTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
def _A( self ):
super().setUp()
lowercase =BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
lowercase =tokenizer
def _A( self ):
lowercase ='''<pad>'''
lowercase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def _A( self ):
lowercase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(snake_case_ ) , 10_11_22 )
def _A( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def _A( self ):
lowercase =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase =[0, 57, 30_18, 7_03_07, 91, 2]
lowercase =self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors='''pt''' )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
lowercase =batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def _A( self ):
if not self.test_rust_tokenizer:
return
lowercase =self.get_tokenizer()
lowercase =self.get_rust_tokenizer()
lowercase ='''I was born in 92000, and this is falsé.'''
lowercase =tokenizer.tokenize(snake_case_ )
lowercase =rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
lowercase =tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
lowercase =rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
lowercase =self.get_rust_tokenizer()
lowercase =tokenizer.encode(snake_case_ )
lowercase =rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def _A( self ):
# fmt: off
lowercase ={'''input_ids''': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowercase =[
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=snake_case_ , )
| 72 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_a = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> str:
"""simple docstring"""
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_UpperCamelCase = 'lm_head'
_UpperCamelCase = getattr(lowerCamelCase_, lowerCamelCase_ )
if weight_type is not None:
_UpperCamelCase = getattr(lowerCamelCase_, lowerCamelCase_ ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, hf_model.config.feat_extract_norm == '''group''', )
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
_UpperCamelCase = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(lowerCamelCase_ )[0].split('''.''' )[-2]
_UpperCamelCase = mapped_key.replace('''*''', lowerCamelCase_ )
if "weight_g" in name:
_UpperCamelCase = 'weight_g'
elif "weight_v" in name:
_UpperCamelCase = 'weight_v'
elif "bias" in name:
_UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCamelCase = 'weight'
else:
_UpperCamelCase = None
set_recursively(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = full_name.split('''conv_layers.''' )[-1]
_UpperCamelCase = name.split('''.''' )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_UpperCamelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=None, __snake_case=None, __snake_case=True ) -> Tuple:
"""simple docstring"""
if config_path is not None:
_UpperCamelCase = UniSpeechConfig.from_pretrained(lowerCamelCase_ )
else:
_UpperCamelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
_UpperCamelCase = Dictionary.load_from_json(lowerCamelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCamelCase = target_dict.pad_index
_UpperCamelCase = target_dict.bos_index
_UpperCamelCase = target_dict.eos_index
_UpperCamelCase = len(target_dict.symbols )
_UpperCamelCase = os.path.join(lowerCamelCase_, '''vocab.json''' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowerCamelCase_ ) )
return
os.makedirs(lowerCamelCase_, exist_ok=lowerCamelCase_ )
_UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_UpperCamelCase = 42
_UpperCamelCase = 43
with open(lowerCamelCase_, '''w''', encoding='''utf-8''' ) as vocab_handle:
json.dump(lowerCamelCase_, lowerCamelCase_ )
_UpperCamelCase = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token='''|''', do_lower_case=lowerCamelCase_, )
_UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
_UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_60_00, padding_value=0, do_normalize=lowerCamelCase_, return_attention_mask=lowerCamelCase_, )
_UpperCamelCase = WavaVecaProcessor(feature_extractor=lowerCamelCase_, tokenizer=lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
_UpperCamelCase = UniSpeechForCTC(lowerCamelCase_ )
else:
_UpperCamelCase = UniSpeechForPreTraining(lowerCamelCase_ )
if is_finetuned:
_UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
_UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_UpperCamelCase = model[0].eval()
recursively_load_weights(lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ )
hf_unispeech.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_a = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 706 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['pixel_values']
def __init__( self , __a = True , __a = 1 / 2_55 , __a = True , __a = 8 , **__a , ) -> None:
'''simple docstring'''
super().__init__(**__a)
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_pad
_UpperCamelCase = pad_size
def UpperCAmelCase ( self , __a , __a , __a = None , **__a) -> np.ndarray:
'''simple docstring'''
return rescale(__a , scale=__a , data_format=__a , **__a)
def UpperCAmelCase ( self , __a , __a , __a = None) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = get_image_size(__a)
_UpperCamelCase = (old_height // size + 1) * size - old_height
_UpperCamelCase = (old_width // size + 1) * size - old_width
return pad(__a , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=__a)
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_pad if do_pad is not None else self.do_pad
_UpperCamelCase = pad_size if pad_size is not None else self.pad_size
_UpperCamelCase = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(__a) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=__a , scale=__a) for image in images]
if do_pad:
_UpperCamelCase = [self.pad(__a , size=__a) for image in images]
_UpperCamelCase = [to_channel_dimension_format(__a , __a) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=__a , tensor_type=__a)
| 78 | 0 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase_ = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.0_1),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class __a ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : int ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =TOKEN
HfFolder.save_token(_UpperCamelCase )
@classmethod
def __A ( cls : int ) -> Optional[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def __A ( self : Dict ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =BertConfig(
vocab_size=9_9 ,hidden_size=3_2 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=3_7 )
config.push_to_hub("""test-config""" ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ =BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCamelCase ,getattr(_UpperCamelCase ,_UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCamelCase ,repo_id="""test-config""" ,push_to_hub=_UpperCamelCase ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ =BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCamelCase ,getattr(_UpperCamelCase ,_UpperCamelCase ) )
def __A ( self : str ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =BertConfig(
vocab_size=9_9 ,hidden_size=3_2 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=3_7 )
config.push_to_hub("""valid_org/test-config-org""" ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ =BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCamelCase ,getattr(_UpperCamelCase ,_UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token ,repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCamelCase ,repo_id="""valid_org/test-config-org""" ,push_to_hub=_UpperCamelCase ,use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ =BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCamelCase ,getattr(_UpperCamelCase ,_UpperCamelCase ) )
def __A ( self : List[Any] ) -> Tuple:
'''simple docstring'''
CustomConfig.register_for_auto_class()
SCREAMING_SNAKE_CASE__ =CustomConfig(attribute=4_2 )
config.push_to_hub("""test-dynamic-config""" ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{"""AutoConfig""": """custom_configuration.CustomConfig"""} )
SCREAMING_SNAKE_CASE__ =AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" ,trust_remote_code=_UpperCamelCase )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,"""CustomConfig""" )
self.assertEqual(new_config.attribute ,4_2 )
class __a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
SCREAMING_SNAKE_CASE__ =c.n_embd + 1 # int
SCREAMING_SNAKE_CASE__ =c.resid_pdrop + 1.0 # float
SCREAMING_SNAKE_CASE__ =not c.scale_attn_weights # bool
SCREAMING_SNAKE_CASE__ =c.summary_type + """foo""" # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_UpperCamelCase ,c.n_embd ,"""mismatch for key: n_embd""" )
self.assertEqual(_UpperCamelCase ,c.resid_pdrop ,"""mismatch for key: resid_pdrop""" )
self.assertEqual(_UpperCamelCase ,c.scale_attn_weights ,"""mismatch for key: scale_attn_weights""" )
self.assertEqual(_UpperCamelCase ,c.summary_type ,"""mismatch for key: summary_type""" )
def __A ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =PretrainedConfig()
SCREAMING_SNAKE_CASE__ =[key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_UpperCamelCase ,["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
SCREAMING_SNAKE_CASE__ =[key for key, value in config_common_kwargs.items() if value == getattr(_UpperCamelCase ,_UpperCamelCase )]
if len(_UpperCamelCase ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
f""" {", ".join(_UpperCamelCase )}.""" )
def __A ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(_UpperCamelCase ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE__ =BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
SCREAMING_SNAKE_CASE__ =BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" ,subfolder="""bert""" )
self.assertIsNotNone(_UpperCamelCase )
def __A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =mock.Mock()
SCREAMING_SNAKE_CASE__ =5_0_0
SCREAMING_SNAKE_CASE__ ={}
SCREAMING_SNAKE_CASE__ =HTTPError
SCREAMING_SNAKE_CASE__ ={}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ =BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" ,return_value=_UpperCamelCase ) as mock_head:
SCREAMING_SNAKE_CASE__ =BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def __A ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =AutoConfig.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ =["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =2
json.dump(configuration.to_dict() ,open(os.path.join(_UpperCamelCase ,"""config.4.0.0.json""" ) ,"""w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
SCREAMING_SNAKE_CASE__ =AutoConfig.from_pretrained(_UpperCamelCase )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
SCREAMING_SNAKE_CASE__ =["""config.42.0.0.json"""]
SCREAMING_SNAKE_CASE__ =7_6_8
configuration.save_pretrained(_UpperCamelCase )
shutil.move(os.path.join(_UpperCamelCase ,"""config.4.0.0.json""" ) ,os.path.join(_UpperCamelCase ,"""config.42.0.0.json""" ) )
SCREAMING_SNAKE_CASE__ =AutoConfig.from_pretrained(_UpperCamelCase )
self.assertEqual(new_configuration.hidden_size ,7_6_8 )
def __A ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ="""hf-internal-testing/test-two-configs"""
import transformers as new_transformers
SCREAMING_SNAKE_CASE__ ="""v4.0.0"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =new_transformers.models.auto.AutoConfig.from_pretrained(
_UpperCamelCase ,return_unused_kwargs=_UpperCamelCase )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_UpperCamelCase ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
SCREAMING_SNAKE_CASE__ ="""v3.0.0"""
SCREAMING_SNAKE_CASE__ =old_transformers.models.auto.AutoConfig.from_pretrained(_UpperCamelCase )
self.assertEqual(old_configuration.hidden_size ,7_6_8 )
| 151 |
def UpperCAmelCase_ ( __UpperCamelCase ):
if len(__UpperCamelCase ) <= 1:
return lst
SCREAMING_SNAKE_CASE__ =1
while i < len(__UpperCamelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =lst[i], lst[i - 1]
i -= 1
if i == 0:
SCREAMING_SNAKE_CASE__ =1
return lst
if __name__ == "__main__":
lowerCamelCase_ = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase_ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 151 | 1 |
def A__ ( lowercase: float, lowercase: float ) -> float:
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 | _lowercase : Dict ='''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 661 | 0 |
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
a__ : Tuple = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : bool , UpperCAmelCase__ : str = None , UpperCAmelCase__ : list = None ) -> int:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = os.path.abspath(os.path.join("examples" , "by_feature" ) )
__SCREAMING_SNAKE_CASE = os.path.abspath("examples" )
for item in os.listdir(SCREAMING_SNAKE_CASE__ ):
if item not in EXCLUDE_EXAMPLES:
__SCREAMING_SNAKE_CASE = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ) and ".py" in item_path:
with self.subTest(
tested_script=SCREAMING_SNAKE_CASE__ , feature_script=SCREAMING_SNAKE_CASE__ , tested_section="main()" if parser_only else "training_function()" , ):
__SCREAMING_SNAKE_CASE = compare_against_test(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = "\n".join(SCREAMING_SNAKE_CASE__ )
if special_strings is not None:
for string in special_strings:
__SCREAMING_SNAKE_CASE = diff.replace(SCREAMING_SNAKE_CASE__ , "" )
self.assertEqual(SCREAMING_SNAKE_CASE__ , "" )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
self.one_complete_example("complete_nlp_example.py" , SCREAMING_SNAKE_CASE__ )
self.one_complete_example("complete_nlp_example.py" , SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
__SCREAMING_SNAKE_CASE = [
" " * 1_6 + "{\n\n",
" " * 2_0 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 2_0 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 2_0 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 2_0 + "\"epoch\": epoch,\n\n",
" " * 1_6 + "},\n\n",
" " * 1_6 + "step=epoch,\n",
" " * 1_2,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.one_complete_example("complete_cv_example.py" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"})
class UpperCamelCase_ ( SCREAMING_SNAKE_CASE_):
"""simple docstring"""
snake_case__ : Optional[Any] = False
@classmethod
def UpperCAmelCase_ ( cls : Optional[int] ) -> str:
super().setUpClass()
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
__SCREAMING_SNAKE_CASE = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def UpperCAmelCase_ ( cls : List[str] ) -> List[Any]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def UpperCAmelCase_ ( self : Dict ) -> Dict:
__SCREAMING_SNAKE_CASE = F"""\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n """.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
__SCREAMING_SNAKE_CASE = F"""\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n """.split()
__SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = F"""\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}\n """.split()
__SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE__ )
self.assertNotIn("epoch 0:" , SCREAMING_SNAKE_CASE__ )
self.assertIn("epoch 1:" , SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = F"""\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}\n """.split()
__SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE__ )
if torch.cuda.is_available():
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
else:
__SCREAMING_SNAKE_CASE = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , SCREAMING_SNAKE_CASE__ )
self.assertIn("epoch 1:" , SCREAMING_SNAKE_CASE__ )
else:
self.assertIn("epoch 0:" , SCREAMING_SNAKE_CASE__ )
self.assertIn("epoch 1:" , SCREAMING_SNAKE_CASE__ )
@slow
def UpperCAmelCase_ ( self : str ) -> Any:
__SCREAMING_SNAKE_CASE = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
__SCREAMING_SNAKE_CASE = run_command(self._launch_args + testargs , return_stdout=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = re.findall("({.+})" , SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE = [r for r in results if "accuracy" in r][-1]
__SCREAMING_SNAKE_CASE = ast.literal_eval(SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def UpperCAmelCase_ ( self : str ) -> Tuple:
__SCREAMING_SNAKE_CASE = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def UpperCAmelCase_ ( self : Dict ) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdir:
__SCREAMING_SNAKE_CASE = F"""\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n """.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , "tracking" ) ) )
def UpperCAmelCase_ ( self : Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 682 |
"""simple docstring"""
def snake_case ( _a: list , _a: int = 0 )-> list:
'''simple docstring'''
lowerCamelCase__ = length or len(_a )
lowerCamelCase__ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
lowerCamelCase__ , lowerCamelCase__ = list_data[i + 1], list_data[i]
lowerCamelCase__ = True
return list_data if not swapped else bubble_sort(_a , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510 | 0 |
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def a_ ( _lowerCAmelCase : Iterable[str] , _lowerCAmelCase : int ):
'''simple docstring'''
lowercase__ : Any = iter(_lowerCAmelCase )
while True:
lowercase__ : List[str] = tuple(itertools.islice(_lowerCAmelCase , _lowerCAmelCase ) )
if not chunk:
return
yield chunk
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Tuple = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
lowercase__ : Tuple = ''
if len(_lowerCAmelCase ) < 2:
return dirty
for i in range(len(_lowerCAmelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowerCAmelCase ) & 1:
clean += "X"
return clean
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Any = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowercase__ : Optional[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowerCAmelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowerCAmelCase )
return table
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : int = generate_table(_lowerCAmelCase )
lowercase__ : List[Any] = prepare_input(_lowerCAmelCase )
lowercase__ : Union[str, Any] = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCAmelCase , 2 ):
lowercase__ , lowercase__ : Any = divmod(table.index(_lowerCAmelCase ) , 5 )
lowercase__ , lowercase__ : Optional[int] = divmod(table.index(_lowerCAmelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : Optional[int] = generate_table(_lowerCAmelCase )
lowercase__ : Optional[int] = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCAmelCase , 2 ):
lowercase__ , lowercase__ : str = divmod(table.index(_lowerCAmelCase ) , 5 )
lowercase__ , lowercase__ : List[str] = divmod(table.index(_lowerCAmelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 645 | """simple docstring"""
from __future__ import annotations
def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 645 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
'''simple docstring'''
@staticmethod
def __snake_case ( *_lowercase : Tuple , **_lowercase : Union[str, Any]) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __snake_case ( self : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple) -> Any:
A_ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection')
A_ = [
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
]
return object_detector, examples
def __snake_case ( self : List[str] , _lowercase : Dict , _lowercase : List[str]) -> List[Any]:
A_ = object_detector(examples[0] , threshold=0.0)
A_ = len(_lowercase)
self.assertGreater(_lowercase , 0)
self.assertEqual(
_lowercase , [
{
'score': ANY(_lowercase),
'label': ANY(_lowercase),
'box': {'xmin': ANY(_lowercase), 'ymin': ANY(_lowercase), 'xmax': ANY(_lowercase), 'ymax': ANY(_lowercase)},
}
for i in range(_lowercase)
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF')
def __snake_case ( self : Optional[Any]) -> str:
pass
@require_torch
def __snake_case ( self : List[Any]) -> int:
A_ = pipeline(
'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection')
A_ = object_detector(
'./tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4) , [
{'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
] , )
A_ = object_detector(
[
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'candidate_labels': ['cat', 'remote', 'couch'],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4) , [
[
{'score': 0.72_35, 'label': 'cat', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.72_18, 'label': 'remote', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.71_84, 'label': 'couch', 'box': {'xmin': 204, 'ymin': 167, 'xmax': 232, 'ymax': 190}},
{'score': 0.67_48, 'label': 'remote', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_56, 'label': 'cat', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.66_14, 'label': 'couch', 'box': {'xmin': 571, 'ymin': 83, 'xmax': 598, 'ymax': 103}},
{'score': 0.64_56, 'label': 'remote', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
{'score': 0.6_42, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 274, 'xmax': 93, 'ymax': 297}},
{'score': 0.64_19, 'label': 'cat', 'box': {'xmin': 494, 'ymin': 105, 'xmax': 521, 'ymax': 127}},
]
] , )
@require_torch
@slow
def __snake_case ( self : Union[str, Any]) -> Optional[Any]:
A_ = pipeline('zero-shot-object-detection')
A_ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
] , )
A_ = object_detector(
[
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
{
'image': 'http://images.cocodataset.org/val2017/000000039769.jpg',
'candidate_labels': ['cat', 'remote', 'couch'],
},
] , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4) , [
[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
[
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
{'score': 0.14_74, 'label': 'remote', 'box': {'xmin': 335, 'ymin': 74, 'xmax': 371, 'ymax': 187}},
{'score': 0.12_08, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 642, 'ymax': 476}},
],
] , )
@require_tf
@unittest.skip('Zero Shot Object Detection not implemented in TF')
def __snake_case ( self : int) -> List[str]:
pass
@require_torch
@slow
def __snake_case ( self : int) -> str:
A_ = 0.2
A_ = pipeline('zero-shot-object-detection')
A_ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=_lowercase , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
{'score': 0.25_37, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 315, 'ymax': 472}},
] , )
@require_torch
@slow
def __snake_case ( self : int) -> Any:
A_ = 2
A_ = pipeline('zero-shot-object-detection')
A_ = object_detector(
'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=_lowercase , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4) , [
{'score': 0.28_68, 'label': 'cat', 'box': {'xmin': 324, 'ymin': 20, 'xmax': 640, 'ymax': 373}},
{'score': 0.2_77, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 177, 'ymax': 115}},
] , )
| 366 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> str:
# Construct model
if openai_config_file == "":
A_ = OpenAIGPTConfig()
else:
A_ = OpenAIGPTConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
A_ = OpenAIGPTModel(SCREAMING_SNAKE_CASE_ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
A_ = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
A_ = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() ,SCREAMING_SNAKE_CASE_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(SCREAMING_SNAKE_CASE_ ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 366 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase_ : Optional[int] = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
lowerCamelCase_ : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase( ):
__a = cn.convert_to_negative(__lowerCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__lowerCamelCase , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCAmelCase( ):
__a = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase( ):
__a = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__a = canny.canny(__lowerCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase( ):
assert gg.gaussian_filter(__lowerCamelCase , 5 , sigma=0.9 ).all()
def lowerCAmelCase( ):
# laplace diagonals
__a = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__a = conv.img_convolve(__lowerCamelCase , __lowerCamelCase ).astype(__lowerCamelCase )
assert res.any()
def lowerCAmelCase( ):
assert med.median_filter(__lowerCamelCase , 3 ).any()
def lowerCAmelCase( ):
__a , __a = sob.sobel_filter(__lowerCamelCase )
assert grad.any() and theta.any()
def lowerCAmelCase( ):
__a = sp.make_sepia(__lowerCamelCase , 20 )
assert sepia.all()
def lowerCAmelCase( __lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
__a = bs.Burkes(imread(__lowerCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase( __lowerCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
__a = rs.NearestNeighbour(imread(__lowerCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase( ):
__a = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
__a = imread(__lowerCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__a = 0
__a = 0
__a = image[x_coordinate][y_coordinate]
__a = lbp.get_neighbors_pixel(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__a = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__a = lbp.local_binary_value(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
assert lbp_image.any()
| 246 | from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__snake_case )
class a__ ( __snake_case ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
A__ : str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
A__ : ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} )
A__ : ClassVar[Features] = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
A__ : str = "question"
A__ : str = "context"
A__ : str = "answers"
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 246 | 1 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Tuple:
UpperCamelCase__ : Tuple = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict:
UpperCamelCase__ , UpperCamelCase__ : str = emb.weight.shape
UpperCamelCase__ : Any = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
UpperCamelCase__ : Dict = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase="facebook/mbart-large-en-ro" , __lowerCAmelCase=False , __lowerCAmelCase=False ) -> Optional[Any]:
UpperCamelCase__ : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )["model"]
remove_ignore_keys_(__lowerCAmelCase )
UpperCamelCase__ : Optional[Any] = state_dict["encoder.embed_tokens.weight"].shape[0]
UpperCamelCase__ : Tuple = MBartConfig.from_pretrained(__lowerCAmelCase , vocab_size=__lowerCAmelCase )
if mbart_aa and finetuned:
UpperCamelCase__ : str = "relu"
UpperCamelCase__ : Optional[Any] = state_dict["decoder.embed_tokens.weight"]
UpperCamelCase__ : str = MBartForConditionalGeneration(__lowerCAmelCase )
model.model.load_state_dict(__lowerCAmelCase )
if finetuned:
UpperCamelCase__ : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
lowerCamelCase : List[str] =parser.parse_args()
lowerCamelCase : Any =convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 228 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Dict =logging.get_logger(__name__)
lowerCamelCase : int =[
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCamelCase__ : Optional[int] = k.replace(__lowerCAmelCase , __lowerCAmelCase )
if k.startswith("encoder" ):
UpperCamelCase__ : Dict = k.replace(".attn" , ".self_attn" )
UpperCamelCase__ : Optional[int] = k.replace("norm1" , "self_attn_layer_norm" )
UpperCamelCase__ : Dict = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
UpperCamelCase__ : int = k.replace("norm1" , "self_attn_layer_norm" )
UpperCamelCase__ : List[Any] = k.replace("norm2" , "encoder_attn_layer_norm" )
UpperCamelCase__ : str = k.replace("norm3" , "final_layer_norm" )
return k
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> List[Any]:
UpperCamelCase__ : str = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
UpperCamelCase__ : Union[str, Any] = sd.pop(__lowerCAmelCase )
UpperCamelCase__ : Dict = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
UpperCamelCase__ : Union[str, Any] = v
lowerCamelCase : Any =['''START''']
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
UpperCamelCase__ : Union[str, Any] = torch.load(__lowerCAmelCase , map_location="cpu" )
UpperCamelCase__ : str = model["model"]
UpperCamelCase__ : int = BlenderbotConfig.from_json_file(__lowerCAmelCase )
UpperCamelCase__ : List[Any] = BlenderbotForConditionalGeneration(__lowerCAmelCase )
UpperCamelCase__ : Optional[Any] = m.model.state_dict().keys()
UpperCamelCase__ : str = []
UpperCamelCase__ : int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCamelCase__ : Union[str, Any] = rename_state_dict_key(__lowerCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
UpperCamelCase__ : Optional[int] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCAmelCase )
m.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
m.half()
m.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
lowerCamelCase : Any =parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json) | 228 | 1 |
"""simple docstring"""
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=[] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = size[0] - overlap_pixels * 2
__SCREAMING_SNAKE_CASE = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__SCREAMING_SNAKE_CASE = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
__SCREAMING_SNAKE_CASE = np.pad(UpperCAmelCase__ , mode='''linear_ramp''' , pad_width=UpperCAmelCase__ , end_values=0 )
if "l" in remove_borders:
__SCREAMING_SNAKE_CASE = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__SCREAMING_SNAKE_CASE = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__SCREAMING_SNAKE_CASE = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__SCREAMING_SNAKE_CASE = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
return max(UpperCAmelCase__ , min(UpperCAmelCase__ , UpperCAmelCase__ ) )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = list(UpperCAmelCase__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__SCREAMING_SNAKE_CASE = clamp_rect(UpperCAmelCase__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]:
__SCREAMING_SNAKE_CASE = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(UpperCAmelCase__ , (original_slice, 0) )
return result
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__SCREAMING_SNAKE_CASE = tile.crop(UpperCAmelCase__ )
return tile
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
__SCREAMING_SNAKE_CASE = n % d
return n - divisor
class A__( __magic_name__ ):
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : AutoencoderKL , __SCREAMING_SNAKE_CASE : CLIPTextModel , __SCREAMING_SNAKE_CASE : CLIPTokenizer , __SCREAMING_SNAKE_CASE : UNetaDConditionModel , __SCREAMING_SNAKE_CASE : DDPMScheduler , __SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __SCREAMING_SNAKE_CASE : int = 3_50 , ) -> Any:
"""simple docstring"""
super().__init__(
vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , low_res_scheduler=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , max_noise_level=__SCREAMING_SNAKE_CASE , )
def _a ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[str] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__SCREAMING_SNAKE_CASE = add_overlap_rect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , image.size )
__SCREAMING_SNAKE_CASE = image.crop(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__SCREAMING_SNAKE_CASE = translated_slice_x - (original_image_slice / 2)
__SCREAMING_SNAKE_CASE = max(0 , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = squeeze_tile(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = to_input.size
__SCREAMING_SNAKE_CASE = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__SCREAMING_SNAKE_CASE = super(__SCREAMING_SNAKE_CASE , self ).__call__(image=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).images[0]
__SCREAMING_SNAKE_CASE = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__SCREAMING_SNAKE_CASE = unsqueeze_tile(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__SCREAMING_SNAKE_CASE = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
__SCREAMING_SNAKE_CASE = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__SCREAMING_SNAKE_CASE ) , mode='''L''' , )
final_image.paste(
__SCREAMING_SNAKE_CASE , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self : str , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : Union[PIL.Image.Image, List[PIL.Image.Image]] , __SCREAMING_SNAKE_CASE : int = 75 , __SCREAMING_SNAKE_CASE : float = 9.0 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : int = 1_28 , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : int = 32 , ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
__SCREAMING_SNAKE_CASE = math.ceil(image.size[0] / tile_size )
__SCREAMING_SNAKE_CASE = math.ceil(image.size[1] / tile_size )
__SCREAMING_SNAKE_CASE = tcx * tcy
__SCREAMING_SNAKE_CASE = 0
for y in range(__SCREAMING_SNAKE_CASE ):
for x in range(__SCREAMING_SNAKE_CASE ):
self._process_tile(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , prompt=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , noise_level=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def _a ( ) -> Union[str, Any]:
# Run a demo
__SCREAMING_SNAKE_CASE = '''stabilityai/stable-diffusion-x4-upscaler'''
__SCREAMING_SNAKE_CASE = StableDiffusionTiledUpscalePipeline.from_pretrained(UpperCAmelCase__ , revision='''fp16''' , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE = pipe.to('''cuda''' )
__SCREAMING_SNAKE_CASE = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(UpperCAmelCase__ ):
print(f"""progress: {obj["progress"]:.4f}""" )
obj["image"].save('''diffusers_library_progress.jpg''' )
__SCREAMING_SNAKE_CASE = pipe(image=UpperCAmelCase__ , prompt='''Black font, white background, vector''' , noise_level=40 , callback=UpperCAmelCase__ )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 690 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A__( unittest.TestCase ):
@property
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def _a ( self : str ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
__SCREAMING_SNAKE_CASE = KarrasVeScheduler()
__SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A__( unittest.TestCase ):
def _a ( self : Any ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''google/ncsnpp-celebahq-256'''
__SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = KarrasVeScheduler()
__SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__SCREAMING_SNAKE_CASE = np.array([0.5_78, 0.58_11, 0.59_24, 0.58_09, 0.5_87, 0.58_86, 0.58_61, 0.58_02, 0.5_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 690 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __UpperCamelCase ( A__ ):
__A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__A : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
__A : ClassVar[Features] = Features({} )
__A : str = "text"
@property
def UpperCamelCase( self ):
return {self.text_column: "text"} | 32 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _SCREAMING_SNAKE_CASE () -> Dict:
"""simple docstring"""
lowercase__ = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowercase__ = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(A )
DownloadCommand.register_subcommand(A )
EnvironmentCommand.register_subcommand(A )
RunCommand.register_subcommand(A )
ServeCommand.register_subcommand(A )
UserCommands.register_subcommand(A )
AddNewModelCommand.register_subcommand(A )
AddNewModelLikeCommand.register_subcommand(A )
LfsCommands.register_subcommand(A )
PTtoTFCommand.register_subcommand(A )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(A , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowercase__ = args.func(A )
service.run()
if __name__ == "__main__":
main()
| 460 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowercase__ : Optional[Any] = logging.getLogger(__name__)
def __lowercase ( _a , _a ):
return (preds == labels).mean()
@dataclass
class _UpperCAmelCase :
_lowerCAmelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
_lowerCAmelCase : Optional[str] = field(
default=lowerCAmelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _UpperCAmelCase :
_lowerCAmelCase : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys())})
_lowerCAmelCase : str = field(metadata={"""help""": """Should contain the data files for the task."""})
_lowerCAmelCase : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_lowerCAmelCase : bool = field(
default=lowerCAmelCase__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
def __lowercase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case_ : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _a )
# Set seed
set_seed(training_args.seed )
try:
snake_case_ : Optional[Any] = processors[data_args.task_name]()
snake_case_ : str = processor.get_labels()
snake_case_ : Any = len(_a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_a , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
snake_case_ : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_ : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_a , cache_dir=model_args.cache_dir , )
# Get datasets
snake_case_ : str = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
snake_case_ : Any = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=_a , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(_a ) -> Dict:
snake_case_ : Any = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(_a , p.label_ids )}
# Data collator
snake_case_ : Any = DataCollatorWithPadding(_a , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
snake_case_ : int = Trainer(
model=_a , args=_a , train_dataset=_a , eval_dataset=_a , compute_metrics=_a , data_collator=_a , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ : Optional[Any] = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case_ : Optional[Any] = trainer.evaluate()
snake_case_ : Optional[Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(_a , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , _a , _a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(_a )
return results
def __lowercase ( _a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 707 |
"""simple docstring"""
import tensorflow as tf
from ...tf_utils import shape_list
class _UpperCAmelCase ( tf.keras.layers.Layer):
def __init__( self : int , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Tuple=1 , lowercase_ : List[str]=False , **lowercase_ : Optional[Any] ):
super().__init__(**lowercase_ )
snake_case_ : int = vocab_size
snake_case_ : Union[str, Any] = d_embed
snake_case_ : Optional[int] = d_proj
snake_case_ : int = cutoffs + [vocab_size]
snake_case_ : Optional[int] = [0] + self.cutoffs
snake_case_ : List[str] = div_val
snake_case_ : int = self.cutoffs[0]
snake_case_ : Optional[Any] = len(self.cutoffs ) - 1
snake_case_ : Any = self.shortlist_size + self.n_clusters
snake_case_ : Dict = keep_order
snake_case_ : Tuple = []
snake_case_ : Optional[Any] = []
def _snake_case ( self : Dict , lowercase_ : int ):
if self.n_clusters > 0:
snake_case_ : Tuple = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='''zeros''' , trainable=lowercase_ , name='''cluster_weight''' )
snake_case_ : List[str] = self.add_weight(
shape=(self.n_clusters,) , initializer='''zeros''' , trainable=lowercase_ , name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
snake_case_ : List[Any] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='''zeros''' , trainable=lowercase_ , name=f"out_projs_._{i}" , )
self.out_projs.append(lowercase_ )
else:
self.out_projs.append(lowercase_ )
snake_case_ : Any = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='''zeros''' , trainable=lowercase_ , name=f"out_layers_._{i}_._weight" , )
snake_case_ : Any = self.add_weight(
shape=(self.vocab_size,) , initializer='''zeros''' , trainable=lowercase_ , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
snake_case_, snake_case_ : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case_ : List[str] = self.d_embed // (self.div_val**i)
snake_case_ : Optional[Any] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='''zeros''' , trainable=lowercase_ , name=f"out_projs_._{i}" )
self.out_projs.append(lowercase_ )
snake_case_ : Any = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='''zeros''' , trainable=lowercase_ , name=f"out_layers_._{i}_._weight" , )
snake_case_ : List[Any] = self.add_weight(
shape=(r_idx - l_idx,) , initializer='''zeros''' , trainable=lowercase_ , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
super().build(lowercase_ )
@staticmethod
def _snake_case ( lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : str=None ):
snake_case_ : str = x
if proj is not None:
snake_case_ : str = tf.einsum('''ibd,ed->ibe''' , lowercase_ , lowercase_ )
return tf.einsum('''ibd,nd->ibn''' , lowercase_ , lowercase_ ) + b
@staticmethod
def _snake_case ( lowercase_ : Tuple , lowercase_ : Union[str, Any] ):
snake_case_ : Optional[int] = shape_list(lowercase_ )
snake_case_ : Any = tf.range(lp_size[0] , dtype=target.dtype )
snake_case_ : Optional[int] = tf.stack([r, target] , 1 )
return tf.gather_nd(lowercase_ , lowercase_ )
def _snake_case ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Tuple=True , lowercase_ : Tuple=False ):
snake_case_ : Optional[int] = 0
if self.n_clusters == 0:
snake_case_ : List[str] = self._logit(lowercase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
snake_case_ : List[Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowercase_ , logits=lowercase_ )
snake_case_ : List[Any] = tf.nn.log_softmax(lowercase_ , axis=-1 )
else:
snake_case_ : Optional[int] = shape_list(lowercase_ )
snake_case_ : Any = []
snake_case_ : List[str] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
snake_case_, snake_case_ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
snake_case_ : Optional[Any] = (target >= l_idx) & (target < r_idx)
snake_case_ : Any = tf.where(lowercase_ )
snake_case_ : List[Any] = tf.boolean_mask(lowercase_ , lowercase_ ) - l_idx
if self.div_val == 1:
snake_case_ : Optional[int] = self.out_layers[0][0][l_idx:r_idx]
snake_case_ : List[Any] = self.out_layers[0][1][l_idx:r_idx]
else:
snake_case_ : Dict = self.out_layers[i][0]
snake_case_ : str = self.out_layers[i][1]
if i == 0:
snake_case_ : Tuple = tf.concat([cur_W, self.cluster_weight] , 0 )
snake_case_ : List[str] = tf.concat([cur_b, self.cluster_bias] , 0 )
snake_case_ : List[Any] = self._logit(lowercase_ , lowercase_ , lowercase_ , self.out_projs[0] )
snake_case_ : Union[str, Any] = tf.nn.log_softmax(lowercase_ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
snake_case_ : int = tf.boolean_mask(lowercase_ , lowercase_ )
snake_case_ : str = self._gather_logprob(lowercase_ , lowercase_ )
else:
snake_case_ : Union[str, Any] = self._logit(lowercase_ , lowercase_ , lowercase_ , self.out_projs[i] )
snake_case_ : List[str] = tf.nn.log_softmax(lowercase_ )
snake_case_ : Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
snake_case_ : Union[str, Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowercase_ )
if target is not None:
snake_case_ : Optional[int] = tf.boolean_mask(lowercase_ , lowercase_ )
snake_case_ : Dict = tf.boolean_mask(lowercase_ , lowercase_ )
snake_case_ : Union[str, Any] = self._gather_logprob(lowercase_ , lowercase_ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowercase_ , -cur_logprob , shape_list(lowercase_ ) )
snake_case_ : Union[str, Any] = tf.concat(lowercase_ , axis=-1 )
if target is not None:
if return_mean:
snake_case_ : Dict = tf.reduce_mean(lowercase_ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowercase_ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowercase_ , name=self.name , aggregation='''mean''' if return_mean else '''''' )
return out
| 485 | 0 |
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A_( A : Dataset , A : Dict[str, str]):
UpperCamelCase = args.log_outputs
UpperCamelCase = '_'.join(args.dataset.split('/') + [args.config, args.split])
# load metric
UpperCamelCase = load_metric('wer')
UpperCamelCase = load_metric('cer')
# compute metrics
UpperCamelCase = wer.compute(references=result['target'] , predictions=result['prediction'])
UpperCamelCase = cer.compute(references=result['target'] , predictions=result['prediction'])
# print & log results
UpperCamelCase = f'''WER: {wer_result}\nCER: {cer_result}'''
print(A)
with open(f'''{dataset_id}_eval_results.txt''' , 'w') as f:
f.write(A)
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCamelCase = f'''log_{dataset_id}_predictions.txt'''
UpperCamelCase = f'''log_{dataset_id}_targets.txt'''
with open(A , 'w') as p, open(A , 'w') as t:
# mapping function to write output
def write_to_file(A : int , A : List[Any]):
p.write(f'''{i}''' + '\n')
p.write(batch['prediction'] + '\n')
t.write(f'''{i}''' + '\n')
t.write(batch['target'] + '\n')
result.map(A , with_indices=A)
def A_( A : str):
UpperCamelCase = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCamelCase = re.sub(A , '' , text.lower())
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCamelCase = ['\n\n', '\n', ' ', ' ']
for t in token_sequences_to_ignore:
UpperCamelCase = ' '.join(text.split(A))
return text
def A_( A : Optional[Any]):
# load dataset
UpperCamelCase = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=A)
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCamelCase = AutoFeatureExtractor.from_pretrained(args.model_id)
UpperCamelCase = feature_extractor.sampling_rate
# resample audio
UpperCamelCase = dataset.cast_column('audio' , Audio(sampling_rate=A))
# load eval pipeline
if args.device is None:
UpperCamelCase = 0 if torch.cuda.is_available() else -1
UpperCamelCase = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device)
# map function to decode audio
def map_to_pred(A : Optional[int]):
UpperCamelCase = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s)
UpperCamelCase = prediction['text']
UpperCamelCase = normalize_text(batch['sentence'])
return batch
# run inference on all examples
UpperCamelCase = dataset.map(A , remove_columns=dataset.column_names)
# compute and log_results
# do not change function below
log_results(A , A)
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
lowerCAmelCase : int = parser.parse_args()
main(args)
| 3 | '''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = AutoencoderKL
UpperCAmelCase__ = '''sample'''
UpperCAmelCase__ = 1E-2
@property
def snake_case__ ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = 4
_UpperCamelCase : Dict = 3
_UpperCamelCase : Tuple = (32, 32)
_UpperCamelCase : str = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase__ )
return {"sample": image}
@property
def snake_case__ ( self : Any ) ->List[str]:
'''simple docstring'''
return (3, 32, 32)
@property
def snake_case__ ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
return (3, 32, 32)
def snake_case__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
_UpperCamelCase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self : int ) ->int:
'''simple docstring'''
pass
def snake_case__ ( self : Dict ) ->Any:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def snake_case__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
_UpperCamelCase : Optional[int] = self.model_class(**lowercase__ )
model.to(lowercase__ )
assert not model.is_gradient_checkpointing and model.training
_UpperCamelCase : List[Any] = model(**lowercase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_UpperCamelCase : Dict = torch.randn_like(lowercase__ )
_UpperCamelCase : Dict = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_UpperCamelCase : List[str] = self.model_class(**lowercase__ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowercase__ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_UpperCamelCase : int = model_a(**lowercase__ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_UpperCamelCase : Union[str, Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
_UpperCamelCase : List[Any] = dict(model.named_parameters() )
_UpperCamelCase : Tuple = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def snake_case__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Tuple = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowercase__ )
_UpperCamelCase : Tuple = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def snake_case__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
_UpperCamelCase : int = model.to(lowercase__ )
model.eval()
if torch_device == "mps":
_UpperCamelCase : Dict = torch.manual_seed(0 )
else:
_UpperCamelCase : Union[str, Any] = torch.Generator(device=lowercase__ ).manual_seed(0 )
_UpperCamelCase : Dict = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCamelCase : str = image.to(lowercase__ )
with torch.no_grad():
_UpperCamelCase : List[str] = model(lowercase__ , sample_posterior=lowercase__ , generator=lowercase__ ).sample
_UpperCamelCase : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_UpperCamelCase : Dict = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
_UpperCamelCase : Dict = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
_UpperCamelCase : Dict = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(lowercase__ , lowercase__ , rtol=1e-2 ) )
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self : List[str] , lowercase__ : List[str] , lowercase__ : List[Any] ) ->Tuple:
'''simple docstring'''
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowercase__ ) for s in shape] )}.npy'''
def snake_case__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Dict , lowercase__ : int=0 , lowercase__ : Tuple=(4, 3, 512, 512) , lowercase__ : Any=False ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.floataa if fpaa else torch.floataa
_UpperCamelCase : Optional[Any] = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase__ , lowercase__ ) ) ).to(lowercase__ ).to(lowercase__ )
return image
def snake_case__ ( self : int , lowercase__ : Any="CompVis/stable-diffusion-v1-4" , lowercase__ : Optional[int]=False ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Tuple = "fp16" if fpaa else None
_UpperCamelCase : List[Any] = torch.floataa if fpaa else torch.floataa
_UpperCamelCase : Dict = AutoencoderKL.from_pretrained(
lowercase__ , subfolder="vae" , torch_dtype=lowercase__ , revision=lowercase__ , )
model.to(lowercase__ ).eval()
return model
def snake_case__ ( self : List[Any] , lowercase__ : Any=0 ) ->Tuple:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(lowercase__ )
return torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def snake_case__ ( self : Tuple , lowercase__ : str , lowercase__ : Dict , lowercase__ : Any ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Any = self.get_sd_vae_model()
_UpperCamelCase : Union[str, Any] = self.get_sd_image(lowercase__ )
_UpperCamelCase : Optional[int] = self.get_generator(lowercase__ )
with torch.no_grad():
_UpperCamelCase : List[str] = model(lowercase__ , generator=lowercase__ , sample_posterior=lowercase__ ).sample
assert sample.shape == image.shape
_UpperCamelCase : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_UpperCamelCase : Union[str, Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowercase__ , lowercase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def snake_case__ ( self : List[Any] , lowercase__ : int , lowercase__ : Tuple ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Dict = self.get_sd_vae_model(fpaa=lowercase__ )
_UpperCamelCase : Tuple = self.get_sd_image(lowercase__ , fpaa=lowercase__ )
_UpperCamelCase : int = self.get_generator(lowercase__ )
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(lowercase__ , generator=lowercase__ , sample_posterior=lowercase__ ).sample
assert sample.shape == image.shape
_UpperCamelCase : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_UpperCamelCase : int = torch.tensor(lowercase__ )
assert torch_all_close(lowercase__ , lowercase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def snake_case__ ( self : int , lowercase__ : int , lowercase__ : Any , lowercase__ : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCamelCase : Tuple = self.get_sd_vae_model()
_UpperCamelCase : str = self.get_sd_image(lowercase__ )
with torch.no_grad():
_UpperCamelCase : List[Any] = model(lowercase__ ).sample
assert sample.shape == image.shape
_UpperCamelCase : int = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_UpperCamelCase : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowercase__ , lowercase__ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def snake_case__ ( self : Optional[int] , lowercase__ : Tuple , lowercase__ : Dict ) ->List[str]:
'''simple docstring'''
_UpperCamelCase : Tuple = self.get_sd_vae_model()
_UpperCamelCase : Any = self.get_sd_image(lowercase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
_UpperCamelCase : Dict = model.decode(lowercase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_UpperCamelCase : str = sample[-1, -2:, :2, -2:].flatten().cpu()
_UpperCamelCase : Any = torch.tensor(lowercase__ )
assert torch_all_close(lowercase__ , lowercase__ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def snake_case__ ( self : Union[str, Any] , lowercase__ : List[str] , lowercase__ : str ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.get_sd_vae_model(fpaa=lowercase__ )
_UpperCamelCase : Tuple = self.get_sd_image(lowercase__ , shape=(3, 4, 64, 64) , fpaa=lowercase__ )
with torch.no_grad():
_UpperCamelCase : int = model.decode(lowercase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
_UpperCamelCase : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_UpperCamelCase : Dict = torch.tensor(lowercase__ )
assert torch_all_close(lowercase__ , lowercase__ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def snake_case__ ( self : str , lowercase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : str = self.get_sd_vae_model(fpaa=lowercase__ )
_UpperCamelCase : List[str] = self.get_sd_image(lowercase__ , shape=(3, 4, 64, 64) , fpaa=lowercase__ )
with torch.no_grad():
_UpperCamelCase : int = model.decode(lowercase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_UpperCamelCase : Tuple = model.decode(lowercase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase__ , lowercase__ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def snake_case__ ( self : Optional[int] , lowercase__ : int ) ->str:
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.get_sd_vae_model()
_UpperCamelCase : Any = self.get_sd_image(lowercase__ , shape=(3, 4, 64, 64) )
with torch.no_grad():
_UpperCamelCase : List[Any] = model.decode(lowercase__ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_UpperCamelCase : Optional[int] = model.decode(lowercase__ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase__ , lowercase__ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def snake_case__ ( self : Union[str, Any] , lowercase__ : List[str] , lowercase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.get_sd_vae_model()
_UpperCamelCase : Any = self.get_sd_image(lowercase__ )
_UpperCamelCase : Optional[int] = self.get_generator(lowercase__ )
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model.encode(lowercase__ ).latent_dist
_UpperCamelCase : Dict = dist.sample(generator=lowercase__ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_UpperCamelCase : List[Any] = sample[0, -1, -3:, -3:].flatten().cpu()
_UpperCamelCase : int = torch.tensor(lowercase__ )
_UpperCamelCase : Union[str, Any] = 3e-3 if torch_device != "mps" else 1e-2
assert torch_all_close(lowercase__ , lowercase__ , atol=lowercase__ )
| 435 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[Any] = MgpstrTokenizer
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : Dict = {}
UpperCamelCase_ : int = False
def _SCREAMING_SNAKE_CASE ( self : int ):
super().setUp()
# fmt: off
lowerCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowerCAmelCase__ = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case__ ) + """\n""" )
def _SCREAMING_SNAKE_CASE ( self : Tuple , **snake_case__ : Dict ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] , snake_case__ : Any ):
lowerCAmelCase__ = """tester"""
lowerCAmelCase__ = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = self.get_tokenizers(do_lower_case=snake_case__ )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
lowerCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=snake_case__ )
self.assertEqual(len(snake_case__ ) , 1 )
lowerCAmelCase__ = tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ )
self.assertTrue(special_token not in decoded )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
lowerCAmelCase__ , lowerCAmelCase__ = self.get_input_output_texts(snake_case__ )
lowerCAmelCase__ = tokenizer.tokenize(snake_case__ )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(snake_case__ )
lowerCAmelCase__ = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(snake_case__ )
self.assertNotEqual(len(snake_case__ ) , 0 )
lowerCAmelCase__ = tokenizer.decode(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
self.assertEqual(text_a.replace(""" """ , """""" ) , snake_case__ )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
| 701 | """simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class a_ ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=snake_case__ , )
assert hasattr(self , """env""" )
def _SCREAMING_SNAKE_CASE ( self : str , snake_case__ : Optional[Any] ):
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase__ = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCAmelCase__ = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCAmelCase__ = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCAmelCase__ = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="""py36""" , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , snake_case__ : str ):
TrainingJobAnalytics(snake_case__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : List[str] ):
# create estimator
lowerCAmelCase__ = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , snake_case__ )
| 674 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : List[str] = '''▁'''
__A : Union[str, Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
__A : List[Any] = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
__A : str = {
'''xlm-roberta-base''': 512,
'''xlm-roberta-large''': 512,
'''xlm-roberta-large-finetuned-conll02-dutch''': 512,
'''xlm-roberta-large-finetuned-conll02-spanish''': 512,
'''xlm-roberta-large-finetuned-conll03-english''': 512,
'''xlm-roberta-large-finetuned-conll03-german''': 512,
}
class _UpperCAmelCase ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Dict = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , A : Any , A : List[str]="<s>" , A : int="</s>" , A : Any="</s>" , A : Optional[int]="<s>" , A : Union[str, Any]="<unk>" , A : Union[str, Any]="<pad>" , A : int="<mask>" , A : Optional[Dict[str, Any]] = None , **A : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Any = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
lowercase_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowercase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
lowercase_ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ : Any = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ : List[Any] = 1
lowercase_ : Tuple = len(self.sp_model ) + self.fairseq_offset
lowercase_ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[Any] ) -> int:
lowercase_ : Any = self.__dict__.copy()
lowercase_ : Dict = None
lowercase_ : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Tuple , A : Any ) -> List[Any]:
lowercase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase_ : str = {}
lowercase_ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A ( self : List[str] , A : List[int] , A : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase_ : str = [self.cls_token_id]
lowercase_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self : Union[str, Any] , A : List[int] , A : Optional[List[int]] = None , A : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def A ( self : List[str] , A : List[int] , A : Optional[List[int]] = None ) -> List[int]:
lowercase_ : Optional[int] = [self.sep_token_id]
lowercase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self : Any ) -> Tuple:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def A ( self : Optional[int] ) -> str:
lowercase_ : int = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : Union[str, Any] , A : str ) -> List[str]:
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def A ( self : Optional[int] , A : Tuple ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : List[Any] = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A ( self : Union[str, Any] , A : List[Any] ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A ( self : str , A : Tuple ) -> Any:
lowercase_ : Union[str, Any] = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def A ( self : Union[str, Any] , A : str , A : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : List[Any] = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowercase_ : int = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 231 |
"""simple docstring"""
import functools
from typing import Any
def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : list[str] ) -> bool:
# Validation
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or len(_lowerCamelCase ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not all(
isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
lowerCamelCase_ = {}
lowerCamelCase_ = 'WORD_KEEPER'
for word in words:
lowerCamelCase_ = trie
for c in word:
if c not in trie_node:
lowerCamelCase_ = {}
lowerCamelCase_ = trie_node[c]
lowerCamelCase_ = True
lowerCamelCase_ = len(_lowerCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(_lowerCamelCase : int ) -> bool:
if index == len_string:
return True
lowerCamelCase_ = trie
for i in range(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = trie_node.get(string[i] , _lowerCamelCase )
if trie_node is None:
return False
if trie_node.get(_lowerCamelCase , _lowerCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 549 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Tuple = """altclip_text_model"""
def __init__(self , lowerCAmelCase_=250002 , lowerCAmelCase_=1024 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_=4096 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=514 , lowerCAmelCase_=1 , lowerCAmelCase_=0.02 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1e-0_5 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , lowerCAmelCase_=768 , **lowerCAmelCase_ , ):
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
A_ : Tuple = vocab_size
A_ : str = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Any = num_attention_heads
A_ : Optional[int] = hidden_act
A_ : Dict = intermediate_size
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[int] = type_vocab_size
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = initializer_factor
A_ : Optional[Any] = layer_norm_eps
A_ : List[str] = position_embedding_type
A_ : Dict = use_cache
A_ : Dict = project_dim
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : int = """altclip_vision_model"""
def __init__(self , lowerCAmelCase_=768 , lowerCAmelCase_=3072 , lowerCAmelCase_=512 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=3 , lowerCAmelCase_=224 , lowerCAmelCase_=32 , lowerCAmelCase_="quick_gelu" , lowerCAmelCase_=1e-5 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1.0 , **lowerCAmelCase_ , ):
super().__init__(**lowerCAmelCase_ )
A_ : Optional[int] = hidden_size
A_ : Dict = intermediate_size
A_ : Any = projection_dim
A_ : int = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = num_channels
A_ : Optional[Any] = patch_size
A_ : Tuple = image_size
A_ : int = initializer_range
A_ : int = initializer_factor
A_ : List[str] = attention_dropout
A_ : str = layer_norm_eps
A_ : int = hidden_act
@classmethod
def lowerCamelCase(cls , lowerCAmelCase_ , **lowerCAmelCase_ ):
cls._set_token_in_kwargs(lowerCAmelCase_ )
A_ : List[Any] = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
A_ : Optional[int] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase_ , **lowerCAmelCase_ )
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : int = """altclip"""
_A : int = True
def __init__(self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=768 , lowerCAmelCase_=2.6592 , **lowerCAmelCase_ ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
A_ : Any = kwargs.pop("""text_config_dict""" , lowerCAmelCase_ )
A_ : int = kwargs.pop("""vision_config_dict""" , lowerCAmelCase_ )
super().__init__(**lowerCAmelCase_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
A_ : Any = {}
# This is the complete result when using `text_config_dict`.
A_ : int = AltCLIPTextConfig(**lowerCAmelCase_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
A_ : int = (
f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
f"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
A_ : str = (
f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
f"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(lowerCAmelCase_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
A_ : List[Any] = {}
# This is the complete result when using `vision_config_dict`.
A_ : Union[str, Any] = AltCLIPVisionConfig(**lowerCAmelCase_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
A_ : Optional[Any] = {
str(lowerCAmelCase_ ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
A_ : Any = (
f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
A_ : Any = (
f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
f"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(lowerCAmelCase_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
A_ : Dict = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
A_ : Optional[Any] = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
A_ : Optional[Any] = AltCLIPTextConfig(**lowerCAmelCase_ )
A_ : Tuple = AltCLIPVisionConfig(**lowerCAmelCase_ )
A_ : Any = projection_dim
A_ : List[str] = logit_scale_init_value
A_ : List[Any] = 1.0
@classmethod
def lowerCamelCase(cls , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase_ )
def lowerCamelCase(self ):
A_ : int = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : int = self.vision_config.to_dict()
A_ : int = self.__class__.model_type
return output
| 700 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[int] = """facebook/bart-large-mnli"""
_A : str = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
_A : List[str] = """text_classifier"""
_A : Optional[int] = AutoTokenizer
_A : Optional[Any] = AutoModelForSequenceClassification
_A : List[str] = ["""text""", ["""text"""]]
_A : Dict = ["""text"""]
def lowerCamelCase(self ):
super().setup()
A_ : int = self.model.config
A_ : List[str] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
A_ : List[Any] = int(lowerCAmelCase_ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : List[Any] = labels
return self.pre_processor(
[text] * len(lowerCAmelCase_ ) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def lowerCamelCase(self , lowerCAmelCase_ ):
A_ : str = outputs.logits
A_ : Optional[int] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 480 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__A = logging.get_logger(__name__)
__A = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
__magic_name__ :List[str] = """swin"""
__magic_name__ :List[Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , __UpperCAmelCase=2_2_4 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=9_6 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 1_2, 2_4] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=3_2 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Tuple = image_size
lowerCAmelCase__ :Tuple = patch_size
lowerCAmelCase__ :Tuple = num_channels
lowerCAmelCase__ :Tuple = embed_dim
lowerCAmelCase__ :Union[str, Any] = depths
lowerCAmelCase__ :Union[str, Any] = len(__UpperCAmelCase )
lowerCAmelCase__ :int = num_heads
lowerCAmelCase__ :str = window_size
lowerCAmelCase__ :Union[str, Any] = mlp_ratio
lowerCAmelCase__ :Optional[int] = qkv_bias
lowerCAmelCase__ :List[str] = hidden_dropout_prob
lowerCAmelCase__ :Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ :List[str] = drop_path_rate
lowerCAmelCase__ :Any = hidden_act
lowerCAmelCase__ :List[Any] = use_absolute_embeddings
lowerCAmelCase__ :Dict = layer_norm_eps
lowerCAmelCase__ :Optional[Any] = initializer_range
lowerCAmelCase__ :List[str] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase__ :Union[str, Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
lowerCAmelCase__ :Any = ['stem'] + [F"stage{idx}" for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ :str = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :str = version.parse("""1.11""" )
@property
def snake_case ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case ( self ):
'''simple docstring'''
return 1E-4
| 93 |
"""simple docstring"""
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _lowerCAmelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = StableUnCLIPPipeline
__magic_name__ :int = TEXT_TO_IMAGE_PARAMS
__magic_name__ :List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ :Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__ :Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__magic_name__ :List[str] = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = 3_2
lowerCAmelCase__ :List[Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase__ :Any = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=__UpperCAmelCase , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ :int = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=__UpperCAmelCase , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_0_0_0 , clip_sample=__UpperCAmelCase , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase__ :Dict = StableUnCLIPImageNormalizer(embedding_dim=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
lowerCAmelCase__ :str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__UpperCAmelCase , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__UpperCAmelCase , layers_per_block=1 , upcast_attention=__UpperCAmelCase , use_linear_projection=__UpperCAmelCase , )
torch.manual_seed(0 )
lowerCAmelCase__ :Tuple = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='v_prediction' , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[int] = AutoencoderKL()
lowerCAmelCase__ :Optional[int] = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=0 ):
'''simple docstring'''
if str(__UpperCAmelCase ).startswith('mps' ):
lowerCAmelCase__ :str = torch.manual_seed(__UpperCAmelCase )
else:
lowerCAmelCase__ :Optional[int] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__UpperCAmelCase )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
lowerCAmelCase__ :List[Any] = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ :List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ :List[str] = pipe('anime turle' , generator=__UpperCAmelCase , output_type='np' )
lowerCAmelCase__ :Union[str, Any] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ :int = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
lowerCAmelCase__ :List[str] = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ :Tuple = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ :str = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 93 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
snake_case : str = SwinvaConfig()
snake_case : List[str] = swinva_name.split("""_""" )
snake_case : Optional[Any] = name_split[1]
if "to" in name_split[3]:
snake_case : Optional[Any] = int(name_split[3][-3:] )
else:
snake_case : List[Any] = int(name_split[3] )
if "to" in name_split[2]:
snake_case : Any = int(name_split[2][-2:] )
else:
snake_case : int = int(name_split[2][6:] )
if model_size == "tiny":
snake_case : int = 96
snake_case : Optional[int] = (2, 2, 6, 2)
snake_case : str = (3, 6, 12, 24)
elif model_size == "small":
snake_case : Optional[int] = 96
snake_case : Optional[Any] = (2, 2, 18, 2)
snake_case : Union[str, Any] = (3, 6, 12, 24)
elif model_size == "base":
snake_case : str = 128
snake_case : Optional[int] = (2, 2, 18, 2)
snake_case : Dict = (4, 8, 16, 32)
else:
snake_case : Dict = 192
snake_case : List[str] = (2, 2, 18, 2)
snake_case : Optional[int] = (6, 12, 24, 48)
if "to" in swinva_name:
snake_case : int = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
snake_case : List[str] = 21841
snake_case : List[Any] = """huggingface/label-files"""
snake_case : Any = """imagenet-22k-id2label.json"""
snake_case : Any = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
snake_case : Union[str, Any] = {int(lowercase ): v for k, v in idalabel.items()}
snake_case : Tuple = idalabel
snake_case : Optional[Any] = {v: k for k, v in idalabel.items()}
else:
snake_case : List[Any] = 1000
snake_case : Optional[int] = """huggingface/label-files"""
snake_case : Dict = """imagenet-1k-id2label.json"""
snake_case : List[str] = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
snake_case : str = {int(lowercase ): v for k, v in idalabel.items()}
snake_case : Optional[Any] = idalabel
snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
snake_case : Tuple = img_size
snake_case : Any = num_classes
snake_case : List[Any] = embed_dim
snake_case : int = depths
snake_case : List[Any] = num_heads
snake_case : Any = window_size
return config
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> str:
if "patch_embed.proj" in name:
snake_case : Any = name.replace("""patch_embed.proj""" ,"""embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
snake_case : Dict = name.replace("""patch_embed.norm""" ,"""embeddings.norm""" )
if "layers" in name:
snake_case : Dict = """encoder.""" + name
if "attn.proj" in name:
snake_case : Dict = name.replace("""attn.proj""" ,"""attention.output.dense""" )
if "attn" in name:
snake_case : List[Any] = name.replace("""attn""" ,"""attention.self""" )
if "norm1" in name:
snake_case : Optional[Any] = name.replace("""norm1""" ,"""layernorm_before""" )
if "norm2" in name:
snake_case : List[str] = name.replace("""norm2""" ,"""layernorm_after""" )
if "mlp.fc1" in name:
snake_case : int = name.replace("""mlp.fc1""" ,"""intermediate.dense""" )
if "mlp.fc2" in name:
snake_case : Dict = name.replace("""mlp.fc2""" ,"""output.dense""" )
if "q_bias" in name:
snake_case : str = name.replace("""q_bias""" ,"""query.bias""" )
if "k_bias" in name:
snake_case : Union[str, Any] = name.replace("""k_bias""" ,"""key.bias""" )
if "v_bias" in name:
snake_case : Tuple = name.replace("""v_bias""" ,"""value.bias""" )
if "cpb_mlp" in name:
snake_case : Tuple = name.replace("""cpb_mlp""" ,"""continuous_position_bias_mlp""" )
if name == "norm.weight":
snake_case : int = """layernorm.weight"""
if name == "norm.bias":
snake_case : Tuple = """layernorm.bias"""
if "head" in name:
snake_case : Any = name.replace("""head""" ,"""classifier""" )
else:
snake_case : List[Any] = """swinv2.""" + name
return name
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Tuple:
for key in orig_state_dict.copy().keys():
snake_case : List[str] = orig_state_dict.pop(lowercase )
if "mask" in key:
continue
elif "qkv" in key:
snake_case : Dict = key.split(""".""" )
snake_case : List[Any] = int(key_split[1] )
snake_case : List[str] = int(key_split[3] )
snake_case : Optional[Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case : List[Any] = val[:dim, :]
snake_case : Union[str, Any] = val[dim : dim * 2, :]
snake_case : Any = val[-dim:, :]
else:
snake_case : List[str] = val[:dim]
snake_case : Union[str, Any] = val[
dim : dim * 2
]
snake_case : List[str] = val[-dim:]
else:
snake_case : Optional[Any] = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Dict:
snake_case : Tuple = timm.create_model(lowercase ,pretrained=lowercase )
timm_model.eval()
snake_case : Tuple = get_swinva_config(lowercase )
snake_case : Optional[int] = SwinvaForImageClassification(lowercase )
model.eval()
snake_case : Optional[Any] = convert_state_dict(timm_model.state_dict() ,lowercase )
model.load_state_dict(lowercase )
snake_case : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : str = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" ,"""-""" ) ) )
snake_case : int = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
snake_case : str = image_processor(images=lowercase ,return_tensors="""pt""" )
snake_case : Dict = timm_model(inputs["""pixel_values"""] )
snake_case : int = model(**lowercase ).logits
assert torch.allclose(lowercase ,lowercase ,atol=1E-3 )
print(f"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowercase )
model.push_to_hub(
repo_path_or_name=Path(lowercase ,lowercase ) ,organization="""nandwalritik""" ,commit_message="""Add model""" ,)
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swinv2_name',
default='swinv2_tiny_patch4_window8_256',
type=str,
help='Name of the Swinv2 timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCamelCase : List[str] = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 684 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> list:
for i in range(len(lowercase ) - 1 ,0 ,-1 ):
snake_case : Any = False
for j in range(lowercase ,0 ,-1 ):
if unsorted[j] < unsorted[j - 1]:
snake_case , snake_case : Optional[Any] = unsorted[j - 1], unsorted[j]
snake_case : Dict = True
for j in range(lowercase ):
if unsorted[j] > unsorted[j + 1]:
snake_case , snake_case : Dict = unsorted[j + 1], unsorted[j]
snake_case : Tuple = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 684 | 1 |
"""simple docstring"""
a_ = [
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 437 | """simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase__: Optional[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase__: Optional[int] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase__: List[str] = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase__: str = model(_UpperCAmelCase , labels=_UpperCAmelCase ).loss
lowercase__: List[str] = -tf.math.reduce_mean(_UpperCAmelCase ).numpy()
lowercase__: List[str] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 586 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self ):
__magic_name__ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__magic_name__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
__magic_name__ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
__magic_name__ = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__magic_name__ = model(_SCREAMING_SNAKE_CASE )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
@slow
def lowerCAmelCase__ ( self ):
__magic_name__ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
__magic_name__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
__magic_name__ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
__magic_name__ = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__magic_name__ = model(_SCREAMING_SNAKE_CASE )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
| 713 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __UpperCAmelCase , unittest.TestCase ):
_lowerCamelCase = FunnelTokenizer
_lowerCamelCase = FunnelTokenizerFast
_lowerCamelCase = True
_lowerCamelCase = True
def lowerCAmelCase__ ( self ):
super().setUp()
__magic_name__ = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
__magic_name__ = '''UNwant\u00E9d,running'''
__magic_name__ = '''unwanted, running'''
return input_text, output_text
def lowerCAmelCase__ ( self ):
__magic_name__ = self.tokenizer_class(self.vocab_file )
__magic_name__ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [7, 4, 5, 10, 8, 9] )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
__magic_name__ = tokenizer('''UNwant\u00E9d,running''' )
__magic_name__ = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
__magic_name__ = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 190 | 0 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
a_ = """facebook/wmt19-en-de"""
a_ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
a_ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
a_ = FSMTForConditionalGeneration(config)
print(f"num of params {tiny_model.num_parameters()}")
# Test
a_ = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
a_ = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
a_ = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 175 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , num_proc=__UpperCAmelCase , **__UpperCAmelCase , )
__lowerCamelCase = Generator(
cache_dir=__UpperCAmelCase , features=__UpperCAmelCase , generator=__UpperCAmelCase , gen_kwargs=__UpperCAmelCase , **__UpperCAmelCase , )
def lowerCamelCase ( self ):
'''simple docstring'''
# Build iterable dataset
if self.streaming:
__lowerCamelCase = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
self.builder.download_and_prepare(
download_config=__UpperCAmelCase , download_mode=__UpperCAmelCase , verification_mode=__UpperCAmelCase , base_path=__UpperCAmelCase , num_proc=self.num_proc , )
__lowerCamelCase = self.builder.as_dataset(
split='''train''' , verification_mode=__UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 175 | 1 |
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 666 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowerCAmelCase = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
__lowerCAmelCase = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
__lowerCAmelCase = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
a_ : Optional[Any] = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in predictions] )
a_ : int = np.array([re.sub(__SCREAMING_SNAKE_CASE , '''''' , __SCREAMING_SNAKE_CASE ) for x in references] )
else:
a_ : List[str] = np.asarray(__SCREAMING_SNAKE_CASE )
a_ : Any = np.asarray(__SCREAMING_SNAKE_CASE )
if ignore_case:
a_ : List[str] = np.char.lower(__SCREAMING_SNAKE_CASE )
a_ : List[Any] = np.char.lower(__SCREAMING_SNAKE_CASE )
if ignore_punctuation:
a_ : Any = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
a_ : Union[str, Any] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : int = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
if ignore_numbers:
a_ : int = string.digits.maketrans('''''' , '''''' , string.digits )
a_ : Optional[int] = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Dict = np.char.translate(__SCREAMING_SNAKE_CASE , table=__SCREAMING_SNAKE_CASE )
a_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(__SCREAMING_SNAKE_CASE ) * 100}
| 666 | 1 |
import numpy as np
__A : Dict = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class A_ :
def __init__( self ):
'''simple docstring'''
UpperCAmelCase = np.array(A__ )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = np.where(letter == self.SQUARE )
UpperCAmelCase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _lowercase ( self , _A , _A ):
'''simple docstring'''
UpperCAmelCase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = message.lower()
UpperCAmelCase = message.replace(''' ''' , '''''' )
UpperCAmelCase = message.replace('''j''' , '''i''' )
UpperCAmelCase = np.empty((2, len(A__ )) )
for letter_index in range(len(A__ ) ):
UpperCAmelCase = self.letter_to_numbers(message[letter_index] )
UpperCAmelCase = numbers[0]
UpperCAmelCase = numbers[1]
UpperCAmelCase = first_step.reshape(2 * len(A__ ) )
UpperCAmelCase = ''''''
for numbers_index in range(len(A__ ) ):
UpperCAmelCase = int(second_step[numbers_index * 2] )
UpperCAmelCase = int(second_step[(numbers_index * 2) + 1] )
UpperCAmelCase = self.numbers_to_letter(A__ , A__ )
UpperCAmelCase = encoded_message + letter
return encoded_message
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = message.lower()
message.replace(''' ''' , '''''' )
UpperCAmelCase = np.empty(2 * len(A__ ) )
for letter_index in range(len(A__ ) ):
UpperCAmelCase = self.letter_to_numbers(message[letter_index] )
UpperCAmelCase = numbers[0]
UpperCAmelCase = numbers[1]
UpperCAmelCase = first_step.reshape((2, len(A__ )) )
UpperCAmelCase = ''''''
for numbers_index in range(len(A__ ) ):
UpperCAmelCase = int(second_step[0, numbers_index] )
UpperCAmelCase = int(second_step[1, numbers_index] )
UpperCAmelCase = self.numbers_to_letter(A__ , A__ )
UpperCAmelCase = decoded_message + letter
return decoded_message
| 130 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__(self : Tuple , A__ : int , A__ : Any=7 , A__ : str=3 , A__ : Dict=1_8 , A__ : Union[str, Any]=3_0 , A__ : List[Any]=4_0_0 , A__ : Dict=True , A__ : Union[str, Any]=None , A__ : Dict=True , A__ : int=None , A__ : int=True , A__ : Union[str, Any]=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , A__ : Optional[int]=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , A__ : int=True , ) -> List[str]:
lowercase = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
lowercase = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
lowercase = parent
lowercase = batch_size
lowercase = num_channels
lowercase = image_size
lowercase = min_resolution
lowercase = max_resolution
lowercase = do_resize
lowercase = size
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_normalize
lowercase = image_mean
lowercase = image_std
lowercase = do_convert_rgb
def UpperCAmelCase__ (self : str ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCAmelCase__ (self : Any , A__ : List[str]=False , A__ : Union[str, Any]=False , A__ : int=False ) -> str:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowercase = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowercase = []
for i in range(self.batch_size ):
lowercase , lowercase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowercase = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowercase = [torch.from_numpy(A__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class UpperCAmelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ (self : Tuple ) -> Any:
lowercase = ChineseCLIPImageProcessingTester(self , do_center_crop=A__ )
@property
def UpperCAmelCase__ (self : Union[str, Any] ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ (self : List[str] ) -> Dict:
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
self.assertTrue(hasattr(A__ , "do_center_crop" ) )
self.assertTrue(hasattr(A__ , "center_crop" ) )
self.assertTrue(hasattr(A__ , "do_normalize" ) )
self.assertTrue(hasattr(A__ , "image_mean" ) )
self.assertTrue(hasattr(A__ , "image_std" ) )
self.assertTrue(hasattr(A__ , "do_convert_rgb" ) )
def UpperCAmelCase__ (self : Optional[int] ) -> Any:
lowercase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 2_2_4, "width": 2_2_4} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
lowercase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def UpperCAmelCase__ (self : str ) -> Union[str, Any]:
pass
def UpperCAmelCase__ (self : int ) -> Optional[int]:
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = self.image_processor_tester.prepare_inputs(equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase__ (self : Optional[int] ) -> Optional[int]:
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase = self.image_processor_tester.prepare_inputs(equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase__ (self : Tuple ) -> Tuple:
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase = self.image_processor_tester.prepare_inputs(equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class UpperCAmelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase : Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ (self : Union[str, Any] ) -> Any:
lowercase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=A__ )
lowercase = 3
@property
def UpperCAmelCase__ (self : Any ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ (self : List[str] ) -> Tuple:
lowercase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , "do_resize" ) )
self.assertTrue(hasattr(A__ , "size" ) )
self.assertTrue(hasattr(A__ , "do_center_crop" ) )
self.assertTrue(hasattr(A__ , "center_crop" ) )
self.assertTrue(hasattr(A__ , "do_normalize" ) )
self.assertTrue(hasattr(A__ , "image_mean" ) )
self.assertTrue(hasattr(A__ , "image_std" ) )
self.assertTrue(hasattr(A__ , "do_convert_rgb" ) )
def UpperCAmelCase__ (self : List[Any] ) -> str:
pass
def UpperCAmelCase__ (self : Dict ) -> Tuple:
# Initialize image_processing
lowercase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase = self.image_processor_tester.prepare_inputs(equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
lowercase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase = image_processing(A__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 310 | 0 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _A ( A ,A ,A ,A ,A ) -> Union[str, Any]:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
lowercase : List[Any] = TapasConfig.from_json_file(A )
# set absolute/relative position embeddings parameter
lowercase : Tuple = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowercase : Union[str, Any] = TapasForQuestionAnswering(config=A )
elif task == "WTQ":
# run_task_main.py hparams
lowercase : Optional[int] = 4
lowercase : List[str] = True
# hparam_utils.py hparams
lowercase : int = 0.66_4694
lowercase : List[Any] = 0.20_7951
lowercase : Dict = 0.12_1194
lowercase : str = True
lowercase : List[str] = True
lowercase : Optional[int] = False
lowercase : str = 0.035_2513
lowercase : int = TapasForQuestionAnswering(config=A )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowercase : List[Any] = 4
lowercase : List[Any] = False
# hparam_utils.py hparams
lowercase : Any = 36.4519
lowercase : Optional[int] = 0.90_3421
lowercase : Any = 222.088
lowercase : str = True
lowercase : List[str] = True
lowercase : Dict = True
lowercase : Any = 0.76_3141
lowercase : List[Any] = TapasForQuestionAnswering(config=A )
elif task == "TABFACT":
lowercase : Optional[Any] = TapasForSequenceClassification(config=A )
elif task == "MLM":
lowercase : Optional[Any] = TapasForMaskedLM(config=A )
elif task == "INTERMEDIATE_PRETRAINING":
lowercase : Dict = TapasModel(config=A )
else:
raise ValueError(F'''Task {task} not supported.''' )
print(F'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(A ,A ,A )
# Save pytorch-model (weights and configuration)
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(A )
# Save tokenizer files
print(F'''Save tokenizer files to {pytorch_dump_path}''' )
lowercase : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-1_0] + "vocab.txt" ,model_max_length=5_1_2 )
tokenizer.save_pretrained(A )
print("Used relative position embeddings:" ,model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 711 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : List[Any] = """▁"""
lowerCAmelCase : Union[str, Any] = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_snake_case = BigBirdTokenizer
_snake_case = BigBirdTokenizerFast
_snake_case = True
_snake_case = True
def a__ ( self ) -> List[Any]:
super().setUp()
lowercase : Dict = self.tokenizer_class(a_ , keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self ) -> Tuple:
lowercase : Optional[int] = "<s>"
lowercase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ )
def a__ ( self ) -> Any:
lowercase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(a_ ) , 1_0_0_4 )
def a__ ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 )
def a__ ( self ) -> Dict:
if not self.test_rust_tokenizer:
return
lowercase : str = self.get_tokenizer()
lowercase : Optional[Any] = self.get_rust_tokenizer()
lowercase : Tuple = "I was born in 92000, and this is falsé."
lowercase : Any = tokenizer.tokenize(a_ )
lowercase : List[str] = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
lowercase : Tuple = tokenizer.encode(a_ , add_special_tokens=a_ )
lowercase : Dict = rust_tokenizer.encode(a_ , add_special_tokens=a_ )
self.assertListEqual(a_ , a_ )
lowercase : Any = self.get_rust_tokenizer()
lowercase : Optional[int] = tokenizer.encode(a_ )
lowercase : Dict = rust_tokenizer.encode(a_ )
self.assertListEqual(a_ , a_ )
def a__ ( self ) -> Union[str, Any]:
lowercase : Optional[int] = BigBirdTokenizer(a_ , keep_accents=a_ )
lowercase : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , )
lowercase : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase : List[str] = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , )
lowercase : Dict = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def a__ ( self ) -> Optional[Any]:
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def a__ ( self ) -> Optional[Any]:
lowercase : List[Any] = "Hello World!"
lowercase : Optional[int] = [6_5, 1_8_5_3_6, 2_2_6_0, 1_0_1, 6_6]
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@slow
def a__ ( self ) -> Union[str, Any]:
lowercase : Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
lowercase : int = [6_5, 8_7_1, 4_1_9, 3_5_8, 9_4_6, 9_9_1, 2_5_2_1, 4_5_2, 3_5_8, 1_3_5_7, 3_8_7, 7_7_5_1, 3_5_3_6, 1_1_2, 9_8_5, 4_5_6, 1_2_6, 8_6_5, 9_3_8, 5_4_0_0, 5_7_3_4, 4_5_8, 1_3_6_8, 4_6_7, 7_8_6, 2_4_6_2, 5_2_4_6, 1_1_5_9, 6_3_3, 8_6_5, 4_5_1_9, 4_5_7, 5_8_2, 8_5_2, 2_5_5_7, 4_2_7, 9_1_6, 5_0_8, 4_0_5, 3_4_3_2_4, 4_9_7, 3_9_1, 4_0_8, 1_1_3_4_2, 1_2_4_4, 3_8_5, 1_0_0, 9_3_8, 9_8_5, 4_5_6, 5_7_4, 3_6_2, 1_2_5_9_7, 3_2_0_0, 3_1_2_9, 1_1_7_2, 6_6] # noqa: E231
# fmt: on
self.assertListEqual(a_ , self.big_tokenizer.encode(a_ ) )
@require_torch
@slow
def a__ ( self ) -> Union[str, Any]:
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase : Tuple = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
lowercase : Dict = " ".join(a_ )
lowercase : int = self.big_tokenizer.encode_plus(a_ , return_tensors="pt" , return_token_type_ids=a_ )
lowercase : Union[str, Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=a_ )
lowercase : Any = BigBirdConfig(attention_type="original_full" )
lowercase : str = BigBirdModel(a_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a_ )
model(**a_ )
@slow
def a__ ( self ) -> Optional[Any]:
lowercase : Tuple = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
lowercase : Any = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def a__ ( self ) -> Optional[int]:
# fmt: off
lowercase : Dict = {"input_ids": [[6_5, 3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4, 6_6], [6_5, 4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [6_5, 4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 6_6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a_ , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 425 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A =logging.get_logger(__name__)
class _snake_case ( a__ ):
lowerCAmelCase :Any = '''timm_backbone'''
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=3 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = backbone
UpperCAmelCase__ : Any = num_channels
UpperCAmelCase__ : Dict = features_only
UpperCAmelCase__ : Dict = use_pretrained_backbone
UpperCAmelCase__ : str = True
UpperCAmelCase__ : List[str] = out_indices if out_indices is not None else (-1,) | 407 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase__ : List[str] = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value
def _UpperCamelCase ( UpperCamelCase__ = 1_7_7_7 , UpperCamelCase__ = 1_8_5_5 , UpperCamelCase__ = 8 ):
UpperCAmelCase__ : List[str] = base
for _ in range(1 , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 1_0**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""") | 407 | 1 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowerCamelCase__ : Optional[Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=False , ) -> Tuple:
'''simple docstring'''
output_path.parent.mkdir(parents=lowercase_ , exist_ok=lowercase_ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowercase_ , lowercase_ , f=output_path.as_posix() , input_names=lowercase_ , output_names=lowercase_ , dynamic_axes=lowercase_ , do_constant_folding=lowercase_ , use_external_data_format=lowercase_ , enable_onnx_checker=lowercase_ , opset_version=lowercase_ , )
else:
export(
lowercase_ , lowercase_ , f=output_path.as_posix() , input_names=lowercase_ , output_names=lowercase_ , dynamic_axes=lowercase_ , do_constant_folding=lowercase_ , opset_version=lowercase_ , )
@torch.no_grad()
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ = False ) -> Any:
'''simple docstring'''
lowercase__ : Dict = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowercase__ : Union[str, Any] = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
lowercase__ : int = """cpu"""
lowercase__ : Dict = Path(lowercase_ )
# VAE DECODER
lowercase__ : Union[str, Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
lowercase__ : List[Any] = vae_decoder.config.latent_channels
# forward only through the decoder part
lowercase__ : Tuple = vae_decoder.decode
onnx_export(
lowercase_ , model_args=(
torch.randn(1 , lowercase_ , 25 , 25 ).to(device=lowercase_ , dtype=lowercase_ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=lowercase_ , )
del vae_decoder
if __name__ == "__main__":
lowerCamelCase__ : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=1_4,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
lowerCamelCase__ : int = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 495 |
def UpperCamelCase ( lowercase_ = 10_00 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 495 | 1 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase__ , unittest.TestCase):
_lowerCAmelCase : Tuple = XGLMTokenizer
_lowerCAmelCase : Dict = XGLMTokenizerFast
_lowerCAmelCase : int = True
_lowerCAmelCase : Optional[Any] = True
def _snake_case ( self : Dict ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ : Optional[Any] = XGLMTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self : Optional[int] ):
snake_case_ : Optional[int] = '''<pad>'''
snake_case_ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def _snake_case ( self : List[Any] ):
snake_case_ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(lowercase_ ) , 1008 )
def _snake_case ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def _snake_case ( self : Optional[Any] ):
snake_case_ : str = XGLMTokenizer(lowercase_ , keep_accents=lowercase_ )
snake_case_ : int = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowercase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
snake_case_ : str = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case_ : Tuple = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(
lowercase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
snake_case_ : str = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def _snake_case ( self : List[Any] ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def _snake_case ( self : Any ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowercase_ , f.name )
snake_case_ : List[Any] = XGLMTokenizer(f.name , keep_accents=lowercase_ )
snake_case_ : Any = pickle.dumps(lowercase_ )
pickle.loads(lowercase_ )
def _snake_case ( self : int ):
if not self.test_rust_tokenizer:
return
snake_case_ : Optional[int] = self.get_tokenizer()
snake_case_ : List[Any] = self.get_rust_tokenizer()
snake_case_ : List[str] = '''I was born in 92000, and this is falsé.'''
snake_case_ : Any = tokenizer.tokenize(lowercase_ )
snake_case_ : Any = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : Any = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
snake_case_ : List[Any] = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
snake_case_ : Optional[int] = self.get_rust_tokenizer()
snake_case_ : Any = tokenizer.encode(lowercase_ )
snake_case_ : Optional[Any] = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
@slow
def _snake_case ( self : Dict ):
snake_case_ : Optional[Any] = '''Hello World!'''
snake_case_ : List[str] = [2, 31227, 4447, 35]
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def _snake_case ( self : int ):
snake_case_ : Optional[int] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
snake_case_ : int = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowercase_ , self.big_tokenizer.encode(lowercase_ ) )
@slow
def _snake_case ( self : Tuple ):
# fmt: off
snake_case_ : int = {
'''input_ids''': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name='''facebook/xglm-564M''' , padding=lowercase_ , )
| 123 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
lowercase__ : Optional[int] = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
lowercase__ : Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase__ : List[Any] = dict(zip(vocab, range(len(vocab))))
lowercase__ : Optional[int] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : Optional[Any] = Path(tmpdirname)
lowercase__ : Any = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
lowercase__ : Tuple = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
lowercase__ : List[Any] = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
lowercase__ : int = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
lowercase__ : Tuple = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=10_00,
tgt_vocab_size=10_00,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
lowercase__ : Optional[int] = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
lowercase__ : Dict = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
lowercase__ : List[Any] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 123 | 1 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['torch', 'transformers', 'onnx']
def __init__( self : List[str] , *_UpperCamelCase : List[str] , **_UpperCamelCase : List[Any]) ->Dict:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , *_UpperCamelCase : List[Any] , **_UpperCamelCase : Optional[Any]) ->List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : Any) ->Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *_UpperCamelCase : Any , **_UpperCamelCase : str) ->List[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *_UpperCamelCase : Tuple , **_UpperCamelCase : int) ->Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['torch', 'transformers', 'onnx']
def __init__( self : int , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Optional[Any]) ->int:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *_UpperCamelCase : List[str] , **_UpperCamelCase : Optional[int]) ->List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any , *_UpperCamelCase : str , **_UpperCamelCase : Any) ->List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *_UpperCamelCase : Tuple , **_UpperCamelCase : Optional[int]) ->str:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , *_UpperCamelCase : List[str] , **_UpperCamelCase : Dict) ->Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : Optional[Any]) ->Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['torch', 'transformers', 'onnx']
def __init__( self : Union[str, Any] , *_UpperCamelCase : Optional[Any] , **_UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : List[str]) ->str:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Dict) ->Tuple:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
class __snake_case ( metaclass=__lowerCAmelCase ):
'''simple docstring'''
_snake_case = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *_UpperCamelCase : Any , **_UpperCamelCase : str) ->List[str]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *_UpperCamelCase : Tuple , **_UpperCamelCase : int) ->List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] , *_UpperCamelCase : Dict , **_UpperCamelCase : int) ->int:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""])
| 718 | import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""c"""])
self.assertEqual(_UpperCamelCase , [2])
# Out indices set to match out features
_lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices(["""a""", """c"""] , _UpperCamelCase , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features set to match out indices
_lowerCamelCase , _lowerCamelCase : Tuple = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [0, 2])
# Out features selected from negative indices
_lowerCamelCase , _lowerCamelCase : str = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase)
self.assertEqual(_UpperCamelCase , ["""a""", """c"""])
self.assertEqual(_UpperCamelCase , [-3, -1])
def _SCREAMING_SNAKE_CASE ( self : int) ->int:
"""simple docstring"""
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , _UpperCamelCase)
# Out features must be a list
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""])
# Out features must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""])
# Out indices must be a list or tuple
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , 0 , ["""a""", """b"""])
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ["""a"""])
# Out features and out indices must be the same length
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""])
# Out features should match out indices
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""])
# Out features and out indices should be in order
with self.assertRaises(_UpperCamelCase):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""])
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""])
def _SCREAMING_SNAKE_CASE ( self : int) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : int = BackboneMixin()
_lowerCamelCase : Union[str, Any] = ["""a""", """b""", """c"""]
_lowerCamelCase : Tuple = ["""a""", """c"""]
_lowerCamelCase : List[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
_lowerCamelCase : str = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""])
self.assertEqual(backbone.out_indices , [0, 1])
_lowerCamelCase : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""])
self.assertEqual(backbone.out_indices , [-3, -1])
| 15 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class snake_case ( _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : Tuple = "yolos"
def __init__( self : Tuple , UpperCamelCase__ : Optional[int]=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Optional[Any]=1_2 , UpperCamelCase__ : List[Any]=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[Any]=1e-12 , UpperCamelCase__ : int=[5_1_2, 8_6_4] , UpperCamelCase__ : str=1_6 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : int=True , UpperCamelCase__ : Union[str, Any]=1_0_0 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : int=1 , UpperCamelCase__ : str=5 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : str=5 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : int=0.1 , **UpperCamelCase__ : Optional[int] , )-> Any:
'''simple docstring'''
super().__init__(**lowercase__)
__lowerCAmelCase: Union[str, Any] = hidden_size
__lowerCAmelCase: Any = num_hidden_layers
__lowerCAmelCase: Tuple = num_attention_heads
__lowerCAmelCase: Dict = intermediate_size
__lowerCAmelCase: str = hidden_act
__lowerCAmelCase: str = hidden_dropout_prob
__lowerCAmelCase: List[Any] = attention_probs_dropout_prob
__lowerCAmelCase: int = initializer_range
__lowerCAmelCase: Tuple = layer_norm_eps
__lowerCAmelCase: Tuple = image_size
__lowerCAmelCase: int = patch_size
__lowerCAmelCase: int = num_channels
__lowerCAmelCase: List[str] = qkv_bias
__lowerCAmelCase: List[str] = num_detection_tokens
__lowerCAmelCase: List[Any] = use_mid_position_embeddings
__lowerCAmelCase: Optional[Any] = auxiliary_loss
# Hungarian matcher
__lowerCAmelCase: Any = class_cost
__lowerCAmelCase: Union[str, Any] = bbox_cost
__lowerCAmelCase: Optional[int] = giou_cost
# Loss coefficients
__lowerCAmelCase: Optional[int] = bbox_loss_coefficient
__lowerCAmelCase: Union[str, Any] = giou_loss_coefficient
__lowerCAmelCase: int = eos_coefficient
class snake_case ( _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ : List[Any] = version.parse("""1.11""" )
@property
def lowercase_ ( self : int)-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def lowercase_ ( self : Dict)-> float:
'''simple docstring'''
return 1e-4
@property
def lowercase_ ( self : int)-> int:
'''simple docstring'''
return 1_2
| 346 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[Any] = logging.get_logger(__name__)
_lowerCAmelCase :Tuple = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : List[Any] = "fnet"
def __init__( self , lowercase__=32_000 , lowercase__=768 , lowercase__=12 , lowercase__=3_072 , lowercase__="gelu_new" , lowercase__=0.1 , lowercase__=512 , lowercase__=4 , lowercase__=0.0_2 , lowercase__=1E-12 , lowercase__=False , lowercase__=512 , lowercase__=3 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Dict = intermediate_size
SCREAMING_SNAKE_CASE : int = hidden_act
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = use_tpu_fourier_optimizations
SCREAMING_SNAKE_CASE : str = tpu_short_seq_length
| 251 | 0 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
SCREAMING_SNAKE_CASE = {'UserAgent': UserAgent().random}
def a (lowerCAmelCase__ ):
__a = script.contents[0]
__a = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A ):
__a = f'''https://www.instagram.com/{username}/'''
__a = self.get_json()
def snake_case_ ( self ):
__a = requests.get(self.url , headers=__A ).text
__a = BeautifulSoup(__A , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def snake_case_ ( self ):
return self.user_data["username"]
@property
def snake_case_ ( self ):
return self.user_data["full_name"]
@property
def snake_case_ ( self ):
return self.user_data["biography"]
@property
def snake_case_ ( self ):
return self.user_data["business_email"]
@property
def snake_case_ ( self ):
return self.user_data["external_url"]
@property
def snake_case_ ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def snake_case_ ( self ):
return self.user_data["edge_follow"]["count"]
@property
def snake_case_ ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def snake_case_ ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def snake_case_ ( self ):
return self.user_data["is_verified"]
@property
def snake_case_ ( self ):
return self.user_data["is_private"]
def a (lowerCAmelCase__ = "github" ):
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
__a = InstagramUser(lowerCAmelCase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowerCAmelCase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120_000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE = InstagramUser('github')
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 209 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = 42
class __UpperCAmelCase ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 3 , __A = 3 , __A = ("DownEncoderBlock2D",) , __A = ("UpDecoderBlock2D",) , __A = (64,) , __A = 1 , __A = "silu" , __A = 3 , __A = 32 , __A = 256 , __A = 32 , __A = None , __A = 0.18215 , __A = "group" , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
__a = vq_embed_dim if vq_embed_dim is not None else latent_channels
__a = nn.Convad(__A , __A , 1 )
__a = VectorQuantizer(__A , __A , beta=0.25 , remap=__A , sane_index_shape=__A )
__a = nn.Convad(__A , __A , 1 )
# pass init params to Decoder
__a = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def snake_case_ ( self , __A , __A = True ):
__a = self.encoder(__A )
__a = self.quant_conv(__A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A )
@apply_forward_hook
def snake_case_ ( self , __A , __A = False , __A = True ):
# also go through quantization layer
if not force_not_quantize:
__a , __a , __a = self.quantize(__A )
else:
__a = h
__a = self.post_quant_conv(__A )
__a = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def snake_case_ ( self , __A , __A = True ):
__a = sample
__a = self.encode(__A ).latents
__a = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
| 209 | 1 |
'''simple docstring'''
def _snake_case ( _SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int = 0 , _SCREAMING_SNAKE_CASE : int = 0 ) -> int:
"""simple docstring"""
lowerCAmelCase = right or len(_SCREAMING_SNAKE_CASE ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 433 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = "git_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=12 , A_=12 , A_=3 , A_=224 , A_=16 , A_="quick_gelu" , A_=1e-5 , A_=0.0 , A_=0.0_2 , **A_ , ) -> Dict:
super().__init__(**A_ )
lowerCAmelCase = hidden_size
lowerCAmelCase = intermediate_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = num_channels
lowerCAmelCase = patch_size
lowerCAmelCase = image_size
lowerCAmelCase = initializer_range
lowerCAmelCase = attention_dropout
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = hidden_act
@classmethod
def __snake_case ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
lowerCAmelCase, lowerCAmelCase = cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
lowerCAmelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = "git"
def __init__( self , A_=None , A_=3_0522 , A_=768 , A_=6 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=1024 , A_=0.0_2 , A_=1e-12 , A_=0 , A_="absolute" , A_=True , A_=False , A_=101 , A_=102 , A_=None , **A_ , ) -> Tuple:
super().__init__(bos_token_id=A_ , eos_token_id=A_ , pad_token_id=A_ , **A_ )
if vision_config is None:
lowerCAmelCase = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
lowerCAmelCase = GitVisionConfig(**A_ )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = position_embedding_type
lowerCAmelCase = use_cache
lowerCAmelCase = tie_word_embeddings
lowerCAmelCase = num_image_with_embedding
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = copy.deepcopy(self.__dict__ )
lowerCAmelCase = self.vision_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output | 433 | 1 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase__ :
"""simple docstring"""
UpperCAmelCase_ =None
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE_ = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = os.path.join(UpperCamelCase__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 712 |
from math import factorial
def A__ ( __lowerCamelCase = 20 ):
SCREAMING_SNAKE_CASE_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE_ = n // 2
return int(factorial(__lowerCamelCase ) / (factorial(__lowerCamelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 597 | 0 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
A = (
subprocess.check_output(f"""git diff --diff-filter=d --name-only {fork_point_sha}""".split()).decode('utf-8').split()
)
A = '|'.join(sys.argv[1:])
A = re.compile(Rf"""^({joined_dirs}).*?\.py$""")
A = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 187 |
from jiwer import compute_measures
import datasets
A = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
A = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
A = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def __lowerCAmelCase ( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False ):
"""simple docstring"""
if concatenate_texts:
return compute_measures(__UpperCamelCase , __UpperCamelCase )["wer"]
else:
snake_case_ = 0
snake_case_ = 0
for prediction, reference in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ = compute_measures(__UpperCamelCase , __UpperCamelCase )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 187 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import random
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ = None ) -> List[str]:
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE = random()
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return F'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
SCREAMING_SNAKE_CASE = str(self.value ) + ' '
SCREAMING_SNAKE_CASE = str(self.left or '' )
SCREAMING_SNAKE_CASE = str(self.right or '' )
return value + left + right
def lowercase (SCREAMING_SNAKE_CASE_ : Node | None , SCREAMING_SNAKE_CASE_ : int ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = split(root.left , SCREAMING_SNAKE_CASE_ )
return left, root
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = split(root.right , SCREAMING_SNAKE_CASE_ )
return root, right
def lowercase (SCREAMING_SNAKE_CASE_ : Node | None , SCREAMING_SNAKE_CASE_ : Node | None ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
SCREAMING_SNAKE_CASE = merge(left.right , SCREAMING_SNAKE_CASE_ )
return left
else:
SCREAMING_SNAKE_CASE = merge(SCREAMING_SNAKE_CASE_ , right.left )
return right
def lowercase (SCREAMING_SNAKE_CASE_ : Node | None , SCREAMING_SNAKE_CASE_ : int ) -> Node | None:
SCREAMING_SNAKE_CASE = Node(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = split(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return merge(merge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : Node | None , SCREAMING_SNAKE_CASE_ : int ) -> Node | None:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = split(SCREAMING_SNAKE_CASE_ , value - 1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = split(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return merge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : Node | None ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowercase (SCREAMING_SNAKE_CASE_ : Node | None , SCREAMING_SNAKE_CASE_ : str ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
SCREAMING_SNAKE_CASE = insert(SCREAMING_SNAKE_CASE_ , int(arg[1:] ) )
elif arg[0] == "-":
SCREAMING_SNAKE_CASE = erase(SCREAMING_SNAKE_CASE_ , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowercase () -> None:
SCREAMING_SNAKE_CASE = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
SCREAMING_SNAKE_CASE = input()
while args != "q":
SCREAMING_SNAKE_CASE = interact_treap(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 327 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ["""vqvae"""]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , mel=lowerCAmelCase__ , vqvae=lowerCAmelCase__ )
def __A ( self ) -> int:
return 50 if isinstance(self.scheduler , lowerCAmelCase__ ) else 1_000
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
SCREAMING_SNAKE_CASE = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
SCREAMING_SNAKE_CASE = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
SCREAMING_SNAKE_CASE = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowerCAmelCase__ , device=self.device , )
SCREAMING_SNAKE_CASE = noise
SCREAMING_SNAKE_CASE = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.mel.audio_slice_to_image(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
SCREAMING_SNAKE_CASE = (input_image / 255) * 2 - 1
SCREAMING_SNAKE_CASE = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
SCREAMING_SNAKE_CASE = self.vqvae.encode(torch.unsqueeze(lowerCAmelCase__ , 0 ) ).latent_dist.sample(
generator=lowerCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , self.scheduler.timesteps[start_step - 1] )
SCREAMING_SNAKE_CASE = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
SCREAMING_SNAKE_CASE = int(mask_start_secs * pixels_per_second )
SCREAMING_SNAKE_CASE = int(mask_end_secs * pixels_per_second )
SCREAMING_SNAKE_CASE = self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )['sample']
else:
SCREAMING_SNAKE_CASE = self.unet(lowerCAmelCase__ , lowerCAmelCase__ )['sample']
if isinstance(self.scheduler , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , )['prev_sample']
else:
SCREAMING_SNAKE_CASE = self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , generator=lowerCAmelCase__ , )['prev_sample']
if mask is not None:
if mask_start > 0:
SCREAMING_SNAKE_CASE = mask[:, step, :, :mask_start]
if mask_end > 0:
SCREAMING_SNAKE_CASE = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
SCREAMING_SNAKE_CASE = 1 / self.vqvae.config.scaling_factor * images
SCREAMING_SNAKE_CASE = self.vqvae.decode(lowerCAmelCase__ )['sample']
SCREAMING_SNAKE_CASE = (images / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
SCREAMING_SNAKE_CASE = (images * 255).round().astype('uint8' )
SCREAMING_SNAKE_CASE = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCAmelCase__ , mode='RGB' ).convert('L' ) for _ in images) )
SCREAMING_SNAKE_CASE = [self.mel.image_to_audio(lowerCAmelCase__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCAmelCase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowerCAmelCase__ ) )
@torch.no_grad()
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , lowerCAmelCase__ )
self.scheduler.set_timesteps(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
SCREAMING_SNAKE_CASE = (sample / 255) * 2 - 1
SCREAMING_SNAKE_CASE = torch.Tensor(lowerCAmelCase__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
SCREAMING_SNAKE_CASE = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
SCREAMING_SNAKE_CASE = self.scheduler.alphas_cumprod[t]
SCREAMING_SNAKE_CASE = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE = self.unet(lowerCAmelCase__ , lowerCAmelCase__ )['sample']
SCREAMING_SNAKE_CASE = (1 - alpha_prod_t_prev) ** 0.5 * model_output
SCREAMING_SNAKE_CASE = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
SCREAMING_SNAKE_CASE = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __A ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> torch.Tensor:
SCREAMING_SNAKE_CASE = acos(torch.dot(torch.flatten(lowerCAmelCase__ ) , torch.flatten(lowerCAmelCase__ ) ) / torch.norm(lowerCAmelCase__ ) / torch.norm(lowerCAmelCase__ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowerCAmelCase__ ) + sin(alpha * theta ) * xa / sin(lowerCAmelCase__ )
| 327 | 1 |
'''simple docstring'''
import os
def lowercase_ ( __A : Any ) -> Tuple:
"""simple docstring"""
lowercase : List[str] =len(grid[0] )
lowercase : Union[str, Any] =len(__A )
lowercase : List[str] =0
lowercase : Union[str, Any] =0
lowercase : List[str] =0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__A ):
for j in range(n_rows - 3 ):
lowercase : Optional[Any] =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowercase : Optional[Any] =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowercase : str =(
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowercase : List[Any] =(
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowercase : Any =max(
__A , __A , __A , __A )
if max_product > largest:
lowercase : List[str] =max_product
return largest
def lowercase_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] =[]
with open(os.path.dirname(__A ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
lowercase : Any =[[int(__A ) for i in grid[j]] for j in range(len(__A ) )]
return largest_product(__A )
if __name__ == "__main__":
print(solution())
| 94 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def lowercase_ ( __A : str ) -> Union[str, Any]:
"""simple docstring"""
return x + 2
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[int] ='''x = 3'''
lowercase : Any ={}
lowercase : Union[str, Any] =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
assert result == 3
self.assertDictEqual(UpperCAmelCase , {'''x''': 3} )
lowercase : str ='''x = y'''
lowercase : Optional[int] ={'''y''': 5}
lowercase : List[str] =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase , {'''x''': 5, '''y''': 5} )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''y = add_two(x)'''
lowercase : str ={'''x''': 3}
lowercase : List[str] =evaluate(UpperCAmelCase , {'''add_two''': add_two} , state=UpperCAmelCase )
assert result == 5
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase : Optional[Any] =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
assert result is None
assert "tried to execute add_two" in out.out
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : int ='''x = 3'''
lowercase : Dict ={}
lowercase : List[Any] =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
assert result == 3
self.assertDictEqual(UpperCAmelCase , {'''x''': 3} )
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
lowercase : Optional[Any] ='''test_dict = {\'x\': x, \'y\': add_two(x)}'''
lowercase : str ={'''x''': 3}
lowercase : Tuple =evaluate(UpperCAmelCase , {'''add_two''': add_two} , state=UpperCAmelCase )
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''x = 3\ny = 5'''
lowercase : int ={}
lowercase : List[str] =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''y''': 5} )
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
lowercase : List[str] ='''text = f\'This is x: {x}.\''''
lowercase : Union[str, Any] ={'''x''': 3}
lowercase : Tuple =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def A__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowercase : Tuple ='''if x <= 3:\n y = 2\nelse:\n y = 5'''
lowercase : Union[str, Any] ={'''x''': 3}
lowercase : Any =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''y''': 2} )
lowercase : Optional[Any] ={'''x''': 8}
lowercase : str =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(UpperCAmelCase , {'''x''': 8, '''y''': 5} )
def A__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] ='''test_list = [x, add_two(x)]'''
lowercase : Any ={'''x''': 3}
lowercase : Tuple =evaluate(UpperCAmelCase , {'''add_two''': add_two} , state=UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , [3, 5] )
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''test_list''': [3, 5]} )
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
lowercase : str ='''y = x'''
lowercase : Dict ={'''x''': 3}
lowercase : Tuple =evaluate(UpperCAmelCase , {} , state=UpperCAmelCase )
assert result == 3
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''y''': 3} )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : Any ='''test_list = [x, add_two(x)]\ntest_list[1]'''
lowercase : Any ={'''x''': 3}
lowercase : Union[str, Any] =evaluate(UpperCAmelCase , {'''add_two''': add_two} , state=UpperCAmelCase )
assert result == 5
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''test_list''': [3, 5]} )
lowercase : int ='''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
lowercase : Union[str, Any] ={'''x''': 3}
lowercase : Tuple =evaluate(UpperCAmelCase , {'''add_two''': add_two} , state=UpperCAmelCase )
assert result == 5
self.assertDictEqual(UpperCAmelCase , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def A__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] ='''x = 0\nfor i in range(3):\n x = i'''
lowercase : List[str] ={}
lowercase : Union[str, Any] =evaluate(UpperCAmelCase , {'''range''': range} , state=UpperCAmelCase )
assert result == 2
self.assertDictEqual(UpperCAmelCase , {'''x''': 2, '''i''': 2} )
| 94 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 1
SCREAMING_SNAKE_CASE_ : str = 3
SCREAMING_SNAKE_CASE_ : Tuple = (32, 32)
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes,rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),cross_attention_dim=32,)
return model
@property
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],latent_channels=4,)
return model
@property
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ : int = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,)
return CLIPTextModel(lowerCAmelCase__ )
@property
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
def extract(*_A : Any,**_A : List[Any] ):
class a__ :
def __init__( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = torch.ones([0] )
def __UpperCamelCase ( self : Union[str, Any],_A : Optional[int] ):
"""simple docstring"""
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ : List[Any] = DDIMScheduler(
beta_start=0.00085,beta_end=0.012,beta_schedule="scaled_linear",clip_sample=lowerCAmelCase__,set_alpha_to_one=lowerCAmelCase__,)
SCREAMING_SNAKE_CASE_ : Any = self.dummy_vae
SCREAMING_SNAKE_CASE_ : Any = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ : Dict = StableDiffusionPipeline(
unet=lowerCAmelCase__,scheduler=lowerCAmelCase__,vae=lowerCAmelCase__,text_encoder=lowerCAmelCase__,tokenizer=lowerCAmelCase__,safety_checker=lowerCAmelCase__,feature_extractor=self.dummy_extractor,)
SCREAMING_SNAKE_CASE_ : List[str] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE_ : Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[Any] = sd_pipe([prompt],generator=lowerCAmelCase__,guidance_scale=6.0,num_inference_steps=2,output_type="np" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : List[str] = sd_pipe(
[prompt],generator=lowerCAmelCase__,guidance_scale=6.0,num_inference_steps=2,output_type="np",return_dict=lowerCAmelCase__,)[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : int = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Any = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ : List[str] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.dummy_vae
SCREAMING_SNAKE_CASE_ : str = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ : Dict = StableDiffusionPipeline(
unet=lowerCAmelCase__,scheduler=lowerCAmelCase__,vae=lowerCAmelCase__,text_encoder=lowerCAmelCase__,tokenizer=lowerCAmelCase__,safety_checker=lowerCAmelCase__,feature_extractor=self.dummy_extractor,)
SCREAMING_SNAKE_CASE_ : int = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Dict = sd_pipe([prompt],generator=lowerCAmelCase__,guidance_scale=6.0,num_inference_steps=2,output_type="np" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = output.images
SCREAMING_SNAKE_CASE_ : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = sd_pipe(
[prompt],generator=lowerCAmelCase__,guidance_scale=6.0,num_inference_steps=2,output_type="np",return_dict=lowerCAmelCase__,)[0]
SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe",safety_checker=lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__,lowerCAmelCase__ )
assert isinstance(pipe.scheduler,lowerCAmelCase__ )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipe("example prompt",num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE_ : int = pipe("example prompt",num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda","This test requires a GPU" )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ : List[str] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = self.dummy_vae
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
SCREAMING_SNAKE_CASE_ : Any = unet.half()
SCREAMING_SNAKE_CASE_ : str = vae.half()
SCREAMING_SNAKE_CASE_ : int = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionPipeline(
unet=lowerCAmelCase__,scheduler=lowerCAmelCase__,vae=lowerCAmelCase__,text_encoder=lowerCAmelCase__,tokenizer=lowerCAmelCase__,safety_checker=lowerCAmelCase__,feature_extractor=self.dummy_extractor,)
SCREAMING_SNAKE_CASE_ : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = "A painting of a squirrel eating a burger"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sd_pipe([prompt],num_inference_steps=2,output_type="np" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : int ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5",safety_checker=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
SCREAMING_SNAKE_CASE_ : Dict = 40_0366_0346
SCREAMING_SNAKE_CASE_ : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
SCREAMING_SNAKE_CASE_ : Any = torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = sd_pipe(
[prompt],generator=lowerCAmelCase__,guidance_scale=lowerCAmelCase__,num_inference_steps=50,output_type="np",width=512,height=512,sld_guidance_scale=0,)
SCREAMING_SNAKE_CASE_ : List[Any] = output.images
SCREAMING_SNAKE_CASE_ : Tuple = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Any = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = sd_pipe(
[prompt],generator=lowerCAmelCase__,guidance_scale=lowerCAmelCase__,num_inference_steps=50,output_type="np",width=512,height=512,sld_guidance_scale=2000,sld_warmup_steps=7,sld_threshold=0.025,sld_momentum_scale=0.5,sld_mom_beta=0.7,)
SCREAMING_SNAKE_CASE_ : List[str] = output.images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Tuple = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5",safety_checker=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE_ : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = "padme amidala taking a bath artwork, safe for work, no nudity"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 27_3497_1755
SCREAMING_SNAKE_CASE_ : Optional[int] = 7
SCREAMING_SNAKE_CASE_ : List[Any] = torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = sd_pipe(
[prompt],generator=lowerCAmelCase__,guidance_scale=lowerCAmelCase__,num_inference_steps=50,output_type="np",width=512,height=512,sld_guidance_scale=0,)
SCREAMING_SNAKE_CASE_ : List[Any] = output.images
SCREAMING_SNAKE_CASE_ : Optional[int] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : int = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = sd_pipe(
[prompt],generator=lowerCAmelCase__,guidance_scale=lowerCAmelCase__,num_inference_steps=50,output_type="np",width=512,height=512,sld_guidance_scale=2000,sld_warmup_steps=7,sld_threshold=0.025,sld_momentum_scale=0.5,sld_mom_beta=0.7,)
SCREAMING_SNAKE_CASE_ : Dict = output.images
SCREAMING_SNAKE_CASE_ : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Tuple = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
SCREAMING_SNAKE_CASE_ : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
SCREAMING_SNAKE_CASE_ : List[str] = 10_4435_5234
SCREAMING_SNAKE_CASE_ : str = 12
SCREAMING_SNAKE_CASE_ : str = torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = sd_pipe(
[prompt],generator=lowerCAmelCase__,guidance_scale=lowerCAmelCase__,num_inference_steps=50,output_type="np",width=512,height=512,sld_guidance_scale=0,)
SCREAMING_SNAKE_CASE_ : str = output.images
SCREAMING_SNAKE_CASE_ : Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
SCREAMING_SNAKE_CASE_ : int = torch.manual_seed(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = sd_pipe(
[prompt],generator=lowerCAmelCase__,guidance_scale=lowerCAmelCase__,num_inference_steps=50,output_type="np",width=512,height=512,sld_guidance_scale=2000,sld_warmup_steps=7,sld_threshold=0.025,sld_momentum_scale=0.5,sld_mom_beta=0.7,)
SCREAMING_SNAKE_CASE_ : List[Any] = output.images
SCREAMING_SNAKE_CASE_ : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 701 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class a__ ( A__ ):
A = 'naver-clova-ix/donut-base-finetuned-docvqa'
A = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
A = 'document_qa'
A = AutoProcessor
A = VisionEncoderDecoderModel
A = ['image', 'text']
A = ['text']
def __init__( self : List[Any],*_A : Any,**_A : Dict ):
"""simple docstring"""
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*_A,**_A )
def __UpperCamelCase ( self : int,_A : "Image",_A : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
SCREAMING_SNAKE_CASE_ : List[str] = task_prompt.replace("{user_input}",_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.pre_processor.tokenizer(
_A,add_special_tokens=_A,return_tensors="pt" ).input_ids
SCREAMING_SNAKE_CASE_ : str = self.pre_processor(_A,return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __UpperCamelCase ( self : Tuple,_A : str ):
"""simple docstring"""
return self.model.generate(
inputs["pixel_values"].to(self.device ),decoder_input_ids=inputs["decoder_input_ids"].to(self.device ),max_length=self.model.decoder.config.max_position_embeddings,early_stopping=_A,pad_token_id=self.pre_processor.tokenizer.pad_token_id,eos_token_id=self.pre_processor.tokenizer.eos_token_id,use_cache=_A,num_beams=1,bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]],return_dict_in_generate=_A,).sequences
def __UpperCamelCase ( self : List[Any],_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.pre_processor.batch_decode(_A )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token,"" )
SCREAMING_SNAKE_CASE_ : Any = sequence.replace(self.pre_processor.tokenizer.pad_token,"" )
SCREAMING_SNAKE_CASE_ : Optional[int] = re.sub(R"<.*?>","",_A,count=1 ).strip() # remove first task start token
SCREAMING_SNAKE_CASE_ : Any = self.pre_processor.tokenajson(_A )
return sequence["answer"]
| 316 | 0 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowercase_ = logging.getLogger(__name__)
def a__ ( snake_case=2 , snake_case=3 , snake_case=16 , snake_case = 10 , snake_case = 2 ):
"""simple docstring"""
def get_dataset(snake_case ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__SCREAMING_SNAKE_CASE : str = get_dataset(snake_case )
__SCREAMING_SNAKE_CASE : Dict = get_dataset(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
__SCREAMING_SNAKE_CASE : Optional[Any] = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 )
return (train_dataloader, valid_dataloader)
def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = []
for epoch in range(snake_case ):
# Train quickly
model.train()
for batch in dataloader:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = batch
__SCREAMING_SNAKE_CASE : List[Any] = model(snake_case )
__SCREAMING_SNAKE_CASE : str = torch.nn.functional.mse_loss(snake_case , snake_case )
accelerator.backward(snake_case )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.randn(1 ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.randn(1 ) )
def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[str] ):
"""simple docstring"""
return x * self.a + self.b
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[Any] = DummyModel()
__SCREAMING_SNAKE_CASE : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dummy_dataloaders()
__SCREAMING_SNAKE_CASE : Optional[Any] = ProjectConfiguration(total_limit=1 , project_dir=_A , automatic_checkpoint_naming=_A )
# Train baseline
__SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator(project_config=_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = accelerator.prepare(
_A , _A , _A , _A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = DummyModel()
__SCREAMING_SNAKE_CASE : int = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dummy_dataloaders()
# Train baseline
__SCREAMING_SNAKE_CASE : Optional[int] = Accelerator()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = accelerator.prepare(
_A , _A , _A , _A )
# Save initial
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(_A , '''initial''' )
accelerator.save_state(_A )
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Any = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE : Dict = optimizer.state_dict()
__SCREAMING_SNAKE_CASE : Tuple = train(3 , _A , _A , _A , _A )
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Union[str, Any] = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE : List[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[int] = DummyModel()
__SCREAMING_SNAKE_CASE : Tuple = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dummy_dataloaders()
__SCREAMING_SNAKE_CASE : List[str] = Accelerator()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(
_A , _A , _A , _A )
accelerator.load_state(_A )
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Any = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE : Dict = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Tuple = train(2 , _A , _A , _A , _A )
# Save everything
__SCREAMING_SNAKE_CASE : Dict = os.path.join(_A , '''checkpoint''' )
accelerator.save_state(_A )
# Load everything back in and make sure all states work
accelerator.load_state(_A )
test_rands += train(1 , _A , _A , _A , _A )
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Tuple = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE : str = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__SCREAMING_SNAKE_CASE : List[str] = DummyModel()
__SCREAMING_SNAKE_CASE : List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dummy_dataloaders()
__SCREAMING_SNAKE_CASE : Tuple = ProjectConfiguration(automatic_checkpoint_naming=_A )
# Train baseline
__SCREAMING_SNAKE_CASE : Any = Accelerator(project_dir=_A , project_config=_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = accelerator.prepare(
_A , _A , _A , _A )
# Save initial
accelerator.save_state()
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Optional[int] = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE : Tuple = optimizer.state_dict()
__SCREAMING_SNAKE_CASE : Any = train(3 , _A , _A , _A , _A )
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Union[str, Any] = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE : Tuple = optimizer.state_dict()
# Train partially
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[int] = DummyModel()
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = dummy_dataloaders()
__SCREAMING_SNAKE_CASE : Optional[int] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_A )
__SCREAMING_SNAKE_CASE : List[str] = Accelerator(project_dir=_A , project_config=_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = accelerator.prepare(
_A , _A , _A , _A )
accelerator.load_state(os.path.join(_A , '''checkpoints''' , '''checkpoint_0''' ) )
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Optional[Any] = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE : Optional[int] = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Optional[int] = train(2 , _A , _A , _A , _A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_A , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , _A , _A , _A , _A )
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Optional[int] = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE : int = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([1, 2, 3] )
__SCREAMING_SNAKE_CASE : str = torch.tensor([2, 3, 4] )
__SCREAMING_SNAKE_CASE : List[str] = DummyModel()
__SCREAMING_SNAKE_CASE : int = torch.optim.Adam(net.parameters() )
__SCREAMING_SNAKE_CASE : int = Accelerator()
with self.assertRaises(_A ) as ve:
accelerator.register_for_checkpointing(_A , _A , _A , _A )
__SCREAMING_SNAKE_CASE : List[str] = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__SCREAMING_SNAKE_CASE : List[str] = DummyModel()
__SCREAMING_SNAKE_CASE : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
__SCREAMING_SNAKE_CASE : Tuple = torch.optim.lr_scheduler.StepLR(_A , step_size=1 , gamma=0.99 )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dummy_dataloaders()
__SCREAMING_SNAKE_CASE : int = ProjectConfiguration(automatic_checkpoint_naming=_A )
# Train baseline
__SCREAMING_SNAKE_CASE : List[Any] = Accelerator(project_dir=_A , project_config=_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(
_A , _A , _A , _A , _A )
# Save initial
accelerator.save_state()
__SCREAMING_SNAKE_CASE : Optional[Any] = scheduler.state_dict()
train(3 , _A , _A , _A , _A , _A )
self.assertNotEqual(_A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_A , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(_A , scheduler.state_dict() )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__SCREAMING_SNAKE_CASE : Optional[int] = DummyModel()
__SCREAMING_SNAKE_CASE : List[str] = ProjectConfiguration(automatic_checkpoint_naming=_A , total_limit=2 )
# Train baseline
__SCREAMING_SNAKE_CASE : Optional[Any] = Accelerator(project_dir=_A , project_config=_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare(_A )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_A , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_A , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = """/tmp/accelerate/state_checkpointing"""
lowercase_ = DummyModel()
lowercase_ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
lowercase_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowercase_ , lowercase_ = dummy_dataloaders()
lowercase_ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowercase_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowercase_ , lowercase_ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowercase_ = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
lowercase_ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
lowercase_ = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
lowercase_ = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 74 | import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0.9 , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= size if size is not None else {'''shortest_edge''': 30}
SCREAMING_SNAKE_CASE__: Any= crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
SCREAMING_SNAKE_CASE__: Dict= parent
SCREAMING_SNAKE_CASE__: List[str]= batch_size
SCREAMING_SNAKE_CASE__: int= num_channels
SCREAMING_SNAKE_CASE__: int= min_resolution
SCREAMING_SNAKE_CASE__: List[Any]= max_resolution
SCREAMING_SNAKE_CASE__: List[str]= do_resize_and_center_crop
SCREAMING_SNAKE_CASE__: Union[str, Any]= size
SCREAMING_SNAKE_CASE__: Dict= crop_pct
SCREAMING_SNAKE_CASE__: Optional[int]= crop_size
SCREAMING_SNAKE_CASE__: Dict= do_normalize
SCREAMING_SNAKE_CASE__: List[str]= image_mean
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_std
def UpperCamelCase_ ( self ) -> Tuple:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowerCamelCase ( UpperCamelCase_ , unittest.TestCase ):
__a = PoolFormerImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: Any= PoolFormerImageProcessingTester(self )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Optional[Any]= self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''size''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''crop_pct''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase , '''image_std''' ) )
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: Any= self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase_ ( self ) -> Tuple:
pass
def UpperCamelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Optional[int]= self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Dict= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> Dict:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: Dict= self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__: List[Any]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Union[str, Any]= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase_ ( self ) -> int:
# Initialize image_processing
SCREAMING_SNAKE_CASE__: List[Any]= self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__: Any= prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__: Optional[int]= image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__: Any= image_processing(lowerCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 64 | 0 |
from math import factorial
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : List[str] ) -> Any:
'''simple docstring'''
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(_snake_case ) // (factorial(_snake_case ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
F'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
F'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
)
| 711 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_UpperCAmelCase : List[Any] = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Path , SCREAMING_SNAKE_CASE_ : Union[str, None] = None , SCREAMING_SNAKE_CASE_ : Union[List[str], None] = None , SCREAMING_SNAKE_CASE_ : Union[str, List[str], None] = None , SCREAMING_SNAKE_CASE_ : bool = True , ):
lowerCAmelCase__ = [file for file in os.listdir(SCREAMING_SNAKE_CASE_ ) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )]
if identifier is not None:
lowerCAmelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for n_ in n_identifier:
lowerCAmelCase__ = [file for file in files if n_ not in file]
else:
lowerCAmelCase__ = [file for file in files if n_identifier not in file]
lowerCAmelCase__ = ignore_files or []
ignore_files.append('''__init__.py''' )
lowerCAmelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , SCREAMING_SNAKE_CASE_ )
if only_modules:
lowerCAmelCase__ = file.split('''.''' )[0]
try:
lowerCAmelCase__ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = doctest.DocTestSuite(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE_ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'{module_identifier} is not a module.' )
else:
lowerCAmelCase__ = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = Path('''src/transformers''' )
lowerCAmelCase__ = '''modeling'''
lowerCAmelCase__ = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(SCREAMING_SNAKE_CASE_ , identifier=SCREAMING_SNAKE_CASE_ , ignore_files=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = Path('''src/transformers''' )
lowerCAmelCase__ = '''tokenization'''
self.analyze_directory(SCREAMING_SNAKE_CASE_ , identifier=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = Path('''src/transformers''' )
lowerCAmelCase__ = '''configuration'''
self.analyze_directory(SCREAMING_SNAKE_CASE_ , identifier=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = Path('''src/transformers''' )
lowerCAmelCase__ = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(SCREAMING_SNAKE_CASE_ , n_identifier=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = Path('''docs/source''' )
lowerCAmelCase__ = ['''favicon.ico''']
self.analyze_directory(SCREAMING_SNAKE_CASE_ , ignore_files=SCREAMING_SNAKE_CASE_ , only_modules=SCREAMING_SNAKE_CASE_ )
| 288 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : UNetaDModel
UpperCAmelCase : ScoreSdeVeScheduler
def __init__( self : List[Any] , snake_case : UNetaDModel , snake_case : ScoreSdeVeScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case , scheduler=snake_case )
@torch.no_grad()
def __call__( self : Optional[int] , snake_case : int = 1 , snake_case : int = 2000 , snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case : Optional[str] = "pil" , snake_case : bool = True , **snake_case : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[str] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : int = self.unet
SCREAMING_SNAKE_CASE : List[str] = randn_tensor(snake_case , generator=snake_case ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Optional[int] = sample.to(self.device )
self.scheduler.set_timesteps(snake_case )
self.scheduler.set_sigmas(snake_case )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : Dict = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE : List[Any] = self.unet(snake_case , snake_case ).sample
SCREAMING_SNAKE_CASE : List[str] = self.scheduler.step_correct(snake_case , snake_case , generator=snake_case ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE : Any = model(snake_case , snake_case ).sample
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.step_pred(snake_case , snake_case , snake_case , generator=snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE : Tuple = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : str = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : int = self.numpy_to_pil(snake_case )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=snake_case ) | 352 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
UpperCAmelCase : List[str] = ['pixel_values']
def __init__( self : List[Any] , snake_case : bool = True , snake_case : Optional[Dict[str, int]] = None , snake_case : PILImageResampling = PILImageResampling.BICUBIC , snake_case : bool = True , snake_case : bool = True , snake_case : Union[int, float] = 1 / 255 , snake_case : Dict[str, int] = None , snake_case : bool = True , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[float, List[float]]] = None , **snake_case : Optional[int] , ):
'''simple docstring'''
super().__init__(**snake_case )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(snake_case )
SCREAMING_SNAKE_CASE : Any = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE : int = get_size_dict(snake_case , default_to_square=snake_case , param_name='crop_size' )
SCREAMING_SNAKE_CASE : Optional[Any] = do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : Dict = do_center_crop
SCREAMING_SNAKE_CASE : Union[str, Any] = crop_size
SCREAMING_SNAKE_CASE : str = size
SCREAMING_SNAKE_CASE : List[str] = resample
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase_ ( self : int , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : PILImageResampling = PILImageResampling.BILINEAR , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Any , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(snake_case )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : Tuple = get_resize_output_image_size(snake_case , size=size['shortest_edge'] , default_to_square=snake_case )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE : int = (size['height'], size['width'])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def lowerCamelCase_ ( self : List[str] , snake_case : np.ndarray , snake_case : Dict[str, int] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(snake_case , size=(size['height'], size['width']) , data_format=snake_case , **snake_case )
def lowerCamelCase_ ( self : Any , snake_case : np.ndarray , snake_case : float , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : Dict ):
'''simple docstring'''
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def lowerCamelCase_ ( self : List[Any] , snake_case : np.ndarray , snake_case : Union[float, List[float]] , snake_case : Union[float, List[float]] , snake_case : Optional[Union[str, ChannelDimension]] = None , **snake_case : List[Any] , ):
'''simple docstring'''
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def lowerCamelCase_ ( self : str , snake_case : ImageInput , snake_case : Optional[bool] = None , snake_case : Dict[str, int] = None , snake_case : PILImageResampling = None , snake_case : bool = None , snake_case : int = None , snake_case : Optional[bool] = None , snake_case : Optional[float] = None , snake_case : Optional[bool] = None , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[float, List[float]]] = None , snake_case : Optional[Union[str, TensorType]] = None , snake_case : Union[str, ChannelDimension] = ChannelDimension.FIRST , **snake_case : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(snake_case , param_name='crop_size' , default_to_square=snake_case )
SCREAMING_SNAKE_CASE : Any = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : str = get_size_dict(snake_case )
if not is_batched(snake_case ):
SCREAMING_SNAKE_CASE : str = [images]
if not valid_images(snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Dict = [to_numpy_array(snake_case ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : str = [self.center_crop(image=snake_case , size=snake_case ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Tuple = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : int = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
SCREAMING_SNAKE_CASE : List[str] = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
SCREAMING_SNAKE_CASE : Any = {'pixel_values': images}
return BatchFeature(data=snake_case , tensor_type=snake_case ) | 352 | 1 |
"""simple docstring"""
UpperCamelCase_ : int = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def A_ ():
'''simple docstring'''
A_ = input("Enter message: " )
A_ = input("Enter key [alphanumeric]: " )
A_ = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
A_ = "encrypt"
A_ = encrypt_message(__a , __a )
elif mode.lower().startswith("d" ):
A_ = "decrypt"
A_ = decrypt_message(__a , __a )
print(f'\n{mode.title()}ed message:' )
print(__a )
def A_ (__a , __a ):
'''simple docstring'''
return translate_message(__a , __a , "encrypt" )
def A_ (__a , __a ):
'''simple docstring'''
return translate_message(__a , __a , "decrypt" )
def A_ (__a , __a , __a ):
'''simple docstring'''
A_ = []
A_ = 0
A_ = key.upper()
for symbol in message:
A_ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
A_ = 0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 482 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
UpperCamelCase_ : Tuple = 637_8137.0
UpperCamelCase_ : List[str] = 635_6752.31_4245
UpperCamelCase_ : Dict = 637_8137
def A_ (__a , __a , __a , __a ):
'''simple docstring'''
A_ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
A_ = atan((1 - flattening) * tan(radians(__a ) ) )
A_ = atan((1 - flattening) * tan(radians(__a ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
A_ = haversine_distance(__a , __a , __a , __a ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
A_ = (b_lata + b_lata) / 2
A_ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
A_ = (sin(__a ) ** 2) * (cos(__a ) ** 2)
A_ = cos(sigma / 2 ) ** 2
A_ = (sigma - sin(__a )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
A_ = (cos(__a ) ** 2) * (sin(__a ) ** 2)
A_ = sin(sigma / 2 ) ** 2
A_ = (sigma + sin(__a )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 482 | 1 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase_ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": 5_12,
"facebook/dpr-ctx_encoder-multiset-base": 5_12,
}
UpperCAmelCase_ = {
"facebook/dpr-question_encoder-single-nq-base": 5_12,
"facebook/dpr-question_encoder-multiset-base": 5_12,
}
UpperCAmelCase_ = {
"facebook/dpr-reader-single-nq-base": 5_12,
"facebook/dpr-reader-multiset-base": 5_12,
}
UpperCAmelCase_ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase_ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
UpperCAmelCase_ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class __UpperCamelCase ( A__ ):
__A : Optional[int] = VOCAB_FILES_NAMES
__A : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__A : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Optional[int] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( A__ ):
__A : Dict = VOCAB_FILES_NAMES
__A : List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
__A : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
UpperCAmelCase_ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
UpperCAmelCase_ = r"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(A__ )
class __UpperCamelCase :
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ):
if titles is None and texts is None:
return super().__call__(
_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
elif titles is None or texts is None:
_UpperCAmelCase = titles if texts is None else texts
return super().__call__(
_UpperCamelCase , _UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors=_UpperCamelCase , return_attention_mask=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase = titles if not isinstance(_UpperCamelCase , _UpperCamelCase ) else [titles]
_UpperCAmelCase = texts if not isinstance(_UpperCamelCase , _UpperCamelCase ) else [texts]
_UpperCAmelCase = len(_UpperCamelCase )
_UpperCAmelCase = questions if not isinstance(_UpperCamelCase , _UpperCamelCase ) else [questions] * n_passages
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError(
f'''There should be as many titles than texts but got {len(_UpperCamelCase )} titles and {len(_UpperCamelCase )} texts.''' )
_UpperCAmelCase = super().__call__(_UpperCamelCase , _UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase )['''input_ids''']
_UpperCAmelCase = super().__call__(_UpperCamelCase , add_special_tokens=_UpperCamelCase , padding=_UpperCamelCase , truncation=_UpperCamelCase )['''input_ids''']
_UpperCAmelCase = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_UpperCamelCase , _UpperCamelCase )
]
}
if return_attention_mask is not False:
_UpperCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_UpperCAmelCase = attention_mask
return self.pad(_UpperCamelCase , padding=_UpperCamelCase , max_length=_UpperCamelCase , return_tensors=_UpperCamelCase )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 16 , _UpperCamelCase = 64 , _UpperCamelCase = 4 , ):
_UpperCAmelCase = reader_input['''input_ids''']
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = reader_output[:3]
_UpperCAmelCase = len(_UpperCamelCase )
_UpperCAmelCase = sorted(range(_UpperCamelCase ) , reverse=_UpperCamelCase , key=relevance_logits.__getitem__ )
_UpperCAmelCase = []
for doc_id in sorted_docs:
_UpperCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_UpperCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_UpperCAmelCase = sequence_ids.index(self.pad_token_id )
else:
_UpperCAmelCase = len(_UpperCamelCase )
_UpperCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_UpperCamelCase , top_spans=_UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_UpperCamelCase , start_index=_UpperCamelCase , end_index=_UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = []
for start_index, start_score in enumerate(_UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_UpperCAmelCase = sorted(_UpperCamelCase , key=lambda _UpperCamelCase : x[1] , reverse=_UpperCamelCase )
_UpperCAmelCase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' )
_UpperCAmelCase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(A__ )
class __UpperCamelCase ( A__ , A__ ):
__A : Dict = VOCAB_FILES_NAMES
__A : Dict = READER_PRETRAINED_VOCAB_FILES_MAP
__A : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Optional[Any] = READER_PRETRAINED_INIT_CONFIGURATION
__A : Tuple = ["""input_ids""", """attention_mask"""] | 32 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase ):
lowerCAmelCase = LDMTextToImagePipeline
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase = False
def __a ( self : Dict ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(_lowercase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __a ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]=0 ):
if str(_lowercase ).startswith('mps' ):
A = torch.manual_seed(_lowercase )
else:
A = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Any ):
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = LDMTextToImagePipeline(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_dummy_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int , _lowercase : List[Any] , _lowercase : int=torch.floataa , _lowercase : int=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : Union[str, Any] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
A = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
A = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : Tuple=torch.floataa , _lowercase : Optional[Any]=0 ):
A = torch.manual_seed(_lowercase )
A = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
A = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : List[str] ):
A = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A = self.get_inputs(_lowercase )
A = pipe(**_lowercase ).images[0]
A = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
A = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 690 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__a: List[Any] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> Tuple:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
self.check_model_type(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ) -> int:
lowercase__ : str = {}, {}
if padding is not None:
lowercase__ : Tuple = padding
if truncation is not None:
lowercase__ : Optional[int] = truncation
if top_k is not None:
lowercase__ : List[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase ) -> int:
if isinstance(__lowerCAmelCase , (Image.Image, str) ) and isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : Optional[Any] = {'''image''': image, '''question''': question}
else:
lowercase__ : List[Any] = image
lowercase__ : Tuple = super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
return results
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False ) -> List[str]:
lowercase__ : Optional[int] = load_image(inputs['''image'''] )
lowercase__ : Optional[int] = self.tokenizer(
inputs['''question'''] , return_tensors=self.framework , padding=__lowerCAmelCase , truncation=__lowerCAmelCase )
lowercase__ : Any = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(__lowerCAmelCase )
return model_inputs
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
lowercase__ : Any = self.model(**__lowerCAmelCase )
return model_outputs
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=5 ) -> int:
if top_k > self.model.config.num_labels:
lowercase__ : List[Any] = self.model.config.num_labels
if self.framework == "pt":
lowercase__ : Tuple = model_outputs.logits.sigmoid()[0]
lowercase__ : int = probs.topk(__lowerCAmelCase )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowercase__ : Union[str, Any] = scores.tolist()
lowercase__ : Optional[int] = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase )]
| 709 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
_enforce_args(UpperCAmelCase , UpperCAmelCase )
if n == 0:
return 0
lowercase__ : Optional[int] = float('''-inf''' )
for i in range(1 , n + 1 ):
lowercase__ : str = max(
UpperCAmelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , UpperCAmelCase ) )
return max_revue
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
_enforce_args(UpperCAmelCase , UpperCAmelCase )
lowercase__ : List[str] = [float('''-inf''' ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
lowercase__ : Union[str, Any] = float('''-inf''' )
for i in range(1 , n + 1 ):
lowercase__ : Tuple = max(
UpperCAmelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , UpperCAmelCase , UpperCAmelCase ) , )
lowercase__ : str = max_revenue
return max_rev[n]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
_enforce_args(UpperCAmelCase , UpperCAmelCase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
lowercase__ : Optional[int] = [float('''-inf''' ) for _ in range(n + 1 )]
lowercase__ : Any = 0
for i in range(1 , n + 1 ):
lowercase__ : Optional[Any] = max_rev[i]
for j in range(1 , i + 1 ):
lowercase__ : Optional[Any] = max(UpperCAmelCase , prices[j - 1] + max_rev[i - j] )
lowercase__ : Tuple = max_revenue_i
return max_rev[n]
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
if n < 0:
lowercase__ : str = F"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(UpperCAmelCase )
if n > len(UpperCAmelCase ):
lowercase__ : Dict = (
'''Each integral piece of rod must have a corresponding price. '''
F"""Got n = {n} but length of prices = {len(UpperCAmelCase )}"""
)
raise ValueError(UpperCAmelCase )
def __UpperCamelCase ( ):
lowercase__ : int = [6, 10, 12, 15, 20, 23]
lowercase__ : Tuple = len(UpperCAmelCase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
lowercase__ : Optional[Any] = 36
lowercase__ : str = top_down_cut_rod(UpperCAmelCase , UpperCAmelCase )
lowercase__ : Optional[int] = bottom_up_cut_rod(UpperCAmelCase , UpperCAmelCase )
lowercase__ : List[str] = naive_cut_rod_recursive(UpperCAmelCase , UpperCAmelCase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 428 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: str = '''facebook/bart-large-mnli'''
UpperCAmelCase__: Optional[Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
UpperCAmelCase__: int = '''text_classifier'''
UpperCAmelCase__: Any = AutoTokenizer
UpperCAmelCase__: Dict = AutoModelForSequenceClassification
UpperCAmelCase__: str = ['''text''', ['''text''']]
UpperCAmelCase__: Union[str, Any] = ['''text''']
def __A ( self ):
super().setup()
A__ : List[str] = self.model.config
A__ : List[str] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
A__ : Optional[Any] = int(A__ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def __A ( self , A__ , A__ ):
A__ : Optional[int] = labels
return self.pre_processor(
[text] * len(A__ ) , [F"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def __A ( self , A__ ):
A__ : List[str] = outputs.logits
A__ : Optional[int] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 456 |
import tensorflow as tf
from ...tf_utils import shape_list
class _a (tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , A__ , A__ , A__ , A__ , A__=1 , A__=False , **A__ ):
super().__init__(**A__ )
A__ : Tuple = vocab_size
A__ : List[str] = d_embed
A__ : int = d_proj
A__ : Tuple = cutoffs + [vocab_size]
A__ : Any = [0] + self.cutoffs
A__ : str = div_val
A__ : int = self.cutoffs[0]
A__ : List[str] = len(self.cutoffs ) - 1
A__ : Optional[int] = self.shortlist_size + self.n_clusters
A__ : List[Any] = keep_order
A__ : Optional[int] = []
A__ : Optional[Any] = []
def __A ( self , A__ ):
if self.n_clusters > 0:
A__ : List[str] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=A__ , name="""cluster_weight""" )
A__ : Union[str, Any] = self.add_weight(
shape=(self.n_clusters,) , initializer="""zeros""" , trainable=A__ , name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A__ : List[str] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=A__ , name=F"""out_projs_._{i}""" , )
self.out_projs.append(A__ )
else:
self.out_projs.append(A__ )
A__ : int = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=A__ , name=F"""out_layers_._{i}_._weight""" , )
A__ : str = self.add_weight(
shape=(self.vocab_size,) , initializer="""zeros""" , trainable=A__ , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A__ , A__ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A__ : Optional[int] = self.d_embed // (self.div_val**i)
A__ : Optional[Any] = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=A__ , name=F"""out_projs_._{i}""" )
self.out_projs.append(A__ )
A__ : List[str] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=A__ , name=F"""out_layers_._{i}_._weight""" , )
A__ : Optional[int] = self.add_weight(
shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=A__ , name=F"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(A__ )
@staticmethod
def __A ( A__ , A__ , A__ , A__=None ):
A__ : Tuple = x
if proj is not None:
A__ : Tuple = tf.einsum("""ibd,ed->ibe""" , A__ , A__ )
return tf.einsum("""ibd,nd->ibn""" , A__ , A__ ) + b
@staticmethod
def __A ( A__ , A__ ):
A__ : Optional[Any] = shape_list(A__ )
A__ : Optional[Any] = tf.range(lp_size[0] , dtype=target.dtype )
A__ : List[str] = tf.stack([r, target] , 1 )
return tf.gather_nd(A__ , A__ )
def __A ( self , A__ , A__ , A__=True , A__=False ):
A__ : str = 0
if self.n_clusters == 0:
A__ : List[str] = self._logit(A__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
A__ : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=A__ , logits=A__ )
A__ : int = tf.nn.log_softmax(A__ , axis=-1 )
else:
A__ : Any = shape_list(A__ )
A__ : int = []
A__ : Union[str, Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A__ , A__ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A__ : Any = (target >= l_idx) & (target < r_idx)
A__ : Optional[Any] = tf.where(A__ )
A__ : int = tf.boolean_mask(A__ , A__ ) - l_idx
if self.div_val == 1:
A__ : Any = self.out_layers[0][0][l_idx:r_idx]
A__ : List[Any] = self.out_layers[0][1][l_idx:r_idx]
else:
A__ : Optional[int] = self.out_layers[i][0]
A__ : Tuple = self.out_layers[i][1]
if i == 0:
A__ : Union[str, Any] = tf.concat([cur_W, self.cluster_weight] , 0 )
A__ : Optional[int] = tf.concat([cur_b, self.cluster_bias] , 0 )
A__ : Any = self._logit(A__ , A__ , A__ , self.out_projs[0] )
A__ : Optional[Any] = tf.nn.log_softmax(A__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A__ : Optional[Any] = tf.boolean_mask(A__ , A__ )
A__ : List[str] = self._gather_logprob(A__ , A__ )
else:
A__ : str = self._logit(A__ , A__ , A__ , self.out_projs[i] )
A__ : Dict = tf.nn.log_softmax(A__ )
A__ : List[str] = self.cutoffs[0] + i - 1 # No probability for the head cluster
A__ : Optional[Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(A__ )
if target is not None:
A__ : Any = tf.boolean_mask(A__ , A__ )
A__ : List[Any] = tf.boolean_mask(A__ , A__ )
A__ : List[Any] = self._gather_logprob(A__ , A__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(A__ , -cur_logprob , shape_list(A__ ) )
A__ : List[str] = tf.concat(A__ , axis=-1 )
if target is not None:
if return_mean:
A__ : List[Any] = tf.reduce_mean(A__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(A__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(A__ , name=self.name , aggregation="""mean""" if return_mean else """""" )
return out
| 456 | 1 |
import numpy as np
def UpperCAmelCase__ ( lowerCamelCase ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
def UpperCAmelCase__ ( lowerCamelCase ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowercase :str = 1
lowercase :Tuple = 1
while repunit:
lowercase :Dict = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def UpperCAmelCase__ ( lowerCamelCase = 1000000 ):
lowercase :List[Any] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 453 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : Union[str, Any] = logging.get_logger(__name__)
_snake_case : Tuple = {"vocab_file": "spm_char.model"}
_snake_case : str = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
_snake_case : Tuple = {
"microsoft/speecht5_asr": 1_024,
"microsoft/speecht5_tts": 1_024,
"microsoft/speecht5_vc": 1_024,
}
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : str="<s>" , lowerCamelCase : Union[str, Any]="</s>" , lowerCamelCase : Any="<unk>" , lowerCamelCase : Union[str, Any]="<pad>" , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : Any , ) -> None:
__snake_case : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
__snake_case : Dict = vocab_file
__snake_case : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCamelCase )
@property
def __snake_case ( self : int ) -> Tuple:
return self.sp_model.get_piece_size()
def __snake_case ( self : List[str] ) -> Any:
__snake_case : List[str] = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ) -> Union[str, Any]:
__snake_case : Optional[int] = self.__dict__.copy()
__snake_case : Optional[Any] = None
return state
def __setstate__( self : int , lowerCamelCase : str ) -> Tuple:
__snake_case : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__snake_case : Tuple = {}
__snake_case : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : List[str] , lowerCamelCase : str ) -> List[str]:
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Union[str, Any] ) -> Optional[int]:
return self.sp_model.piece_to_id(lowerCamelCase )
def __snake_case ( self : Tuple , lowerCamelCase : Dict ) -> Tuple:
__snake_case : List[Any] = self.sp_model.IdToPiece(lowerCamelCase )
return token
def __snake_case ( self : int , lowerCamelCase : Any ) -> Tuple:
__snake_case : Optional[Any] = []
__snake_case : Tuple = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowerCamelCase ) + token
__snake_case : Tuple = []
else:
current_sub_tokens.append(lowerCamelCase )
out_string += self.sp_model.decode(lowerCamelCase )
return out_string.strip()
def __snake_case ( self : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
__snake_case : str = [1]
if token_ids_a is None:
return ([0] * len(lowerCamelCase )) + suffix_ones
return ([0] * len(lowerCamelCase )) + ([0] * len(lowerCamelCase )) + suffix_ones
def __snake_case ( self : Any , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__snake_case : Optional[int] = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , "wb" ) as fi:
__snake_case : int = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
| 81 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = AutoencoderKL
__UpperCAmelCase : Optional[Any] = "sample"
__UpperCAmelCase : Optional[int] = 1e-2
@property
def __snake_case ( self : Dict ) -> Optional[Any]:
__snake_case : Optional[Any] = 4
__snake_case : Tuple = 3
__snake_case : List[str] = (32, 32)
__snake_case : str = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase )
return {"sample": image}
@property
def __snake_case ( self : Union[str, Any] ) -> Tuple:
return (3, 32, 32)
@property
def __snake_case ( self : int ) -> int:
return (3, 32, 32)
def __snake_case ( self : Optional[Any] ) -> Dict:
__snake_case : Optional[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
__snake_case : Any = self.dummy_input
return init_dict, inputs_dict
def __snake_case ( self : str ) -> Dict:
pass
def __snake_case ( self : Tuple ) -> List[str]:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def __snake_case ( self : Any ) -> Optional[Any]:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case : int = self.prepare_init_args_and_inputs_for_common()
__snake_case : str = self.model_class(**lowerCamelCase )
model.to(lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
__snake_case : str = model(**lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case : Any = torch.randn_like(lowerCamelCase )
__snake_case : str = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case : Optional[int] = self.model_class(**lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case : int = model_a(**lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case : Union[str, Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__snake_case : Optional[int] = dict(model.named_parameters() )
__snake_case : List[Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def __snake_case ( self : List[Any] ) -> Optional[int]:
__snake_case , __snake_case : Optional[Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCamelCase )
__snake_case : Optional[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __snake_case ( self : Optional[Any] ) -> Union[str, Any]:
__snake_case : Tuple = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
__snake_case : Dict = model.to(lowerCamelCase )
model.eval()
if torch_device == "mps":
__snake_case : int = torch.manual_seed(0 )
else:
__snake_case : str = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__snake_case : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case : Union[str, Any] = image.to(lowerCamelCase )
with torch.no_grad():
__snake_case : str = model(lowerCamelCase , sample_posterior=lowerCamelCase , generator=lowerCamelCase ).sample
__snake_case : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case : Union[str, Any] = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
__snake_case : Tuple = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
__snake_case : List[str] = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(lowerCamelCase , lowerCamelCase , rtol=1E-2 ) )
@slow
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : int , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] ) -> List[str]:
return F'gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase ) for s in shape] )}.npy'
def __snake_case ( self : List[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Tuple , lowerCamelCase : List[Any]=0 , lowerCamelCase : Tuple=(4, 3, 512, 512) , lowerCamelCase : Optional[int]=False ) -> str:
__snake_case : List[Any] = torch.floataa if fpaa else torch.floataa
__snake_case : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase , lowerCamelCase ) ) ).to(lowerCamelCase ).to(lowerCamelCase )
return image
def __snake_case ( self : Optional[Any] , lowerCamelCase : int="CompVis/stable-diffusion-v1-4" , lowerCamelCase : int=False ) -> int:
__snake_case : str = "fp16" if fpaa else None
__snake_case : int = torch.floataa if fpaa else torch.floataa
__snake_case : int = AutoencoderKL.from_pretrained(
lowerCamelCase , subfolder="vae" , torch_dtype=lowerCamelCase , revision=lowerCamelCase , )
model.to(lowerCamelCase ).eval()
return model
def __snake_case ( self : str , lowerCamelCase : int=0 ) -> Optional[Any]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase )
return torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ) -> List[Any]:
__snake_case : Optional[Any] = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase )
__snake_case : Tuple = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : Optional[Any] = model(lowerCamelCase , generator=lowerCamelCase , sample_posterior=lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : int = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : Any , lowerCamelCase : List[str] , lowerCamelCase : List[str] ) -> Tuple:
__snake_case : Any = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase , fpaa=lowerCamelCase )
__snake_case : List[Any] = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : str = model(lowerCamelCase , generator=lowerCamelCase , sample_posterior=lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Any = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __snake_case ( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : Dict ) -> int:
__snake_case : int = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase )
with torch.no_grad():
__snake_case : int = model(lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : List[str] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Any ) -> Optional[Any]:
__snake_case : List[str] = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : str = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case : str = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case : Optional[int] = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : str , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ) -> int:
__snake_case : int = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : List[str] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase )
with torch.no_grad():
__snake_case : Union[str, Any] = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Optional[Any] = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def __snake_case ( self : Tuple , lowerCamelCase : List[Any] ) -> Tuple:
__snake_case : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : Any = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase )
with torch.no_grad():
__snake_case : str = model.decode(lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Any = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def __snake_case ( self : List[Any] , lowerCamelCase : Any ) -> Optional[int]:
__snake_case : str = self.get_sd_vae_model()
__snake_case : Union[str, Any] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : List[Any] = model.decode(lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Dict = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def __snake_case ( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Dict ) -> Optional[int]:
__snake_case : str = self.get_sd_vae_model()
__snake_case : int = self.get_sd_image(lowerCamelCase )
__snake_case : int = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : Optional[Any] = model.encode(lowerCamelCase ).latent_dist
__snake_case : Dict = dist.sample(generator=lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case : List[str] = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case : Dict = torch.tensor(lowerCamelCase )
__snake_case : Dict = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=lowerCamelCase )
| 81 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Dict = '''biogpt'''
def __init__( self , lowerCAmelCase_=4_23_84 , lowerCAmelCase_=10_24 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_=40_96 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10_24 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ) -> Union[str, Any]:
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = scale_embedding
_A = use_cache
_A = layerdrop
_A = activation_dropout
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
| 720 | import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
_SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
_SCREAMING_SNAKE_CASE = {'facebook/blenderbot_small-90M': 512}
def snake_case ( snake_case__ :Tuple) -> str:
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
_A = char
_A = set(snake_case__)
return pairs
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = VOCAB_FILES_NAMES
lowerCamelCase :Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase :int = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="__start__" , lowerCAmelCase_="__end__" , lowerCAmelCase_="__unk__" , lowerCAmelCase_="__null__" , **lowerCAmelCase_ , ) -> int:
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as vocab_handle:
_A = json.load(lowerCAmelCase_ )
_A = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as merges_handle:
_A = merges_handle.read().split("""\n""" )[1:-1]
_A = [tuple(merge.split() ) for merge in merges]
_A = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A = {}
@property
def UpperCAmelCase ( self ) -> int:
return len(self.encoder )
def UpperCAmelCase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
if token in self.cache:
return self.cache[token]
_A = re.sub("""([.,!?()])""" , r""" \1""" , lowerCAmelCase_ )
_A = re.sub("""(')""" , r""" \1 """ , lowerCAmelCase_ )
_A = re.sub(r"""\s{2,}""" , """ """ , lowerCAmelCase_ )
if "\n" in token:
_A = token.replace("""\n""" , """ __newln__""" )
_A = token.split(""" """ )
_A = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A = token.lower()
_A = tuple(lowerCAmelCase_ )
_A = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_A = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(lowerCAmelCase_ ):
try:
_A = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(lowerCAmelCase_ )
_A = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A = get_pairs(lowerCAmelCase_ )
_A = """@@ """.join(lowerCAmelCase_ )
_A = word[:-4]
_A = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[str]:
_A = []
_A = re.findall(r"""\S+\n?""" , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> int:
_A = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> str:
_A = """ """.join(lowerCAmelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_A = os.path.join(
lowerCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + """\n""" )
_A = 0
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
_A = token_index
writer.write(""" """.join(lowerCAmelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
| 83 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class a ( a__ ):
snake_case__ = 42
class a ( a__ , a__ ):
@register_to_config
def __init__( self , _snake_case = 3 , _snake_case = 3 , _snake_case = ("DownEncoderBlock2D",) , _snake_case = ("UpDecoderBlock2D",) , _snake_case = (64,) , _snake_case = 1 , _snake_case = "silu" , _snake_case = 3 , _snake_case = 32 , _snake_case = 2_56 , _snake_case = 32 , _snake_case = None , _snake_case = 0.18_215 , _snake_case = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowerCAmelCase = Encoder(
in_channels=_snake_case , out_channels=_snake_case , down_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , double_z=_snake_case , )
lowerCAmelCase = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
lowerCAmelCase = VectorQuantizer(_snake_case , _snake_case , beta=0.25 , remap=_snake_case , sane_index_shape=_snake_case )
lowerCAmelCase = nn.Convad(_snake_case , _snake_case , 1 )
# pass init params to Decoder
lowerCAmelCase = Decoder(
in_channels=_snake_case , out_channels=_snake_case , up_block_types=_snake_case , block_out_channels=_snake_case , layers_per_block=_snake_case , act_fn=_snake_case , norm_num_groups=_snake_case , norm_type=_snake_case , )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = self.encoder(_snake_case )
lowerCAmelCase = self.quant_conv(_snake_case )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_snake_case )
@apply_forward_hook
def UpperCamelCase__ ( self , _snake_case , _snake_case = False , _snake_case = True ):
"""simple docstring"""
if not force_not_quantize:
lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase = self.quantize(_snake_case )
else:
lowerCAmelCase = h
lowerCAmelCase = self.post_quant_conv(_snake_case )
lowerCAmelCase = self.decoder(_snake_case , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case = True ):
"""simple docstring"""
lowerCAmelCase = sample
lowerCAmelCase = self.encode(_snake_case ).latents
lowerCAmelCase = self.decode(_snake_case ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_snake_case )
| 4 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__UpperCamelCase : str = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=_UpperCAmelCase , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=_UpperCAmelCase , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=_UpperCAmelCase , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=_UpperCAmelCase , default='data/dump' , help='The dump file prefix.' )
lowerCAmelCase = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
lowerCAmelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['cls_token'] # `<s>`
lowerCAmelCase = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
lowerCAmelCase = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
lowerCAmelCase = fp.readlines()
logger.info('Start encoding' )
logger.info(F'{len(_UpperCAmelCase )} examples to process.' )
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = 1_0000
lowerCAmelCase = time.time()
for text in data:
lowerCAmelCase = F'{bos} {text.strip()} {sep}'
lowerCAmelCase = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
rslt.append(_UpperCAmelCase )
iter += 1
if iter % interval == 0:
lowerCAmelCase = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
lowerCAmelCase = time.time()
logger.info('Finished binarization' )
logger.info(F'{len(_UpperCAmelCase )} examples processed.' )
lowerCAmelCase = F'{args.dump_file}.{args.tokenizer_name}.pickle'
lowerCAmelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCAmelCase = [np.uintaa(_UpperCAmelCase ) for d in rslt]
else:
lowerCAmelCase = [np.intaa(_UpperCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(_UpperCAmelCase , 'wb' ) as handle:
pickle.dump(rslt_ , _UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 4 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : Tuple = "dandelin/vilt-b32-finetuned-vqa"
lowerCamelCase : List[str] = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
lowerCamelCase : Optional[Any] = "image_qa"
lowerCamelCase : str = AutoProcessor
lowerCamelCase : Union[str, Any] = AutoModelForVisualQuestionAnswering
lowerCamelCase : Any = ["image", "text"]
lowerCamelCase : Dict = ["text"]
def __init__( self , *a__ , **a__ ) -> Any:
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*a__ , **a__ )
def __lowercase ( self , a__ , a__ ) -> int:
'''simple docstring'''
return self.pre_processor(a__ , a__ , return_tensors="""pt""" )
def __lowercase ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
return self.model(**a__ ).logits
def __lowercase ( self , a__ ) -> Tuple:
'''simple docstring'''
__snake_case :str = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 291 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCamelCase ( snake_case__ : str ,snake_case__ : Dict ,snake_case__ : List[str] ):
'''simple docstring'''
__snake_case :Tuple = 1.5
__snake_case :Any = int(factor * num_class_images )
__snake_case :List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" ,indice_name="""laion_400m""" ,num_images=snake_case__ ,aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' ,exist_ok=snake_case__ )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
__snake_case :Optional[Any] = client.query(text=snake_case__ )
if len(snake_case__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
__snake_case :Tuple = int(factor * num_images )
__snake_case :Any = ClipClient(
url="""https://knn.laion.ai/knn-service""" ,indice_name="""laion_400m""" ,num_images=snake_case__ ,aesthetic_weight=0.1 ,)
__snake_case :Dict = 0
__snake_case :Tuple = 0
__snake_case :Dict = tqdm(desc="""downloading real regularization images""" ,total=snake_case__ )
with open(f'''{class_data_dir}/caption.txt''' ,"""w""" ) as fa, open(f'''{class_data_dir}/urls.txt''' ,"""w""" ) as fa, open(
f'''{class_data_dir}/images.txt''' ,"""w""" ) as fa:
while total < num_class_images:
__snake_case :List[Any] = class_images[count]
count += 1
try:
__snake_case :str = requests.get(images["""url"""] )
if img.status_code == 200:
__snake_case :Any = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' ,"""wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :List[str] = argparse.ArgumentParser("""""" ,add_help=snake_case__ )
parser.add_argument("""--class_prompt""" ,help="""text prompt to retrieve images""" ,required=snake_case__ ,type=snake_case__ )
parser.add_argument("""--class_data_dir""" ,help="""path to save images""" ,required=snake_case__ ,type=snake_case__ )
parser.add_argument("""--num_class_images""" ,help="""number of images to download""" ,default=200 ,type=snake_case__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 291 | 1 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
A : str = logging.get_logger(__name__)
A : str = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
A : List[str] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def UpperCamelCase ( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Any ) -> int:
"""simple docstring"""
for attribute in key.split(""".""" ):
lowercase__ = getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
lowercase__ = getattr(__magic_name__ , __magic_name__ ).shape
else:
lowercase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCamelCase ( __magic_name__ : str , __magic_name__ : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
lowercase__ = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""" )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(__magic_name__ )[0].split(""".""" )[-2]
lowercase__ = mapped_key.replace("""*""" , __magic_name__ )
if "weight_g" in name:
lowercase__ = """weight_g"""
elif "weight_v" in name:
lowercase__ = """weight_v"""
elif "bias" in name:
lowercase__ = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase__ = """weight"""
else:
lowercase__ = None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = full_name.split("""conv_layers.""" )[-1]
lowercase__ = name.split(""".""" )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def UpperCamelCase ( __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
lowercase__ = UniSpeechSatConfig.from_pretrained(__magic_name__ )
else:
lowercase__ = UniSpeechSatConfig()
lowercase__ = """"""
if is_finetuned:
lowercase__ = UniSpeechSatForCTC(__magic_name__ )
else:
lowercase__ = UniSpeechSatForPreTraining(__magic_name__ )
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
lowercase__ = model[0].eval()
recursively_load_weights(__magic_name__ , __magic_name__ )
hf_wavavec.save_pretrained(__magic_name__ )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
A : Optional[int] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 15 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class _a :
"""simple docstring"""
def __init__( self : int , __UpperCamelCase : list[str] )->Dict:
_UpperCAmelCase = []
self.adlist.append(
{'''value''': '''''', '''next_states''': [], '''fail_state''': 0, '''output''': []} )
for keyword in keywords:
self.add_keyword(__UpperCamelCase )
self.set_fail_transitions()
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : str )->int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowercase__ ( self : Tuple , __UpperCamelCase : str )->None:
_UpperCAmelCase = 0
for character in keyword:
_UpperCAmelCase = self.find_next_state(__UpperCamelCase , __UpperCamelCase )
if next_state is None:
self.adlist.append(
{
'''value''': character,
'''next_states''': [],
'''fail_state''': 0,
'''output''': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
_UpperCAmelCase = len(self.adlist ) - 1
else:
_UpperCAmelCase = next_state
self.adlist[current_state]["output"].append(__UpperCamelCase )
def lowercase__ ( self : List[str] )->None:
_UpperCAmelCase = deque()
for node in self.adlist[0]["next_states"]:
q.append(__UpperCamelCase )
_UpperCAmelCase = 0
while q:
_UpperCAmelCase = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__UpperCamelCase )
_UpperCAmelCase = self.adlist[r]['''fail_state''']
while (
self.find_next_state(__UpperCamelCase , self.adlist[child]['''value'''] ) is None
and state != 0
):
_UpperCAmelCase = self.adlist[state]['''fail_state''']
_UpperCAmelCase = self.find_next_state(
__UpperCamelCase , self.adlist[child]['''value'''] )
if self.adlist[child]["fail_state"] is None:
_UpperCAmelCase = 0
_UpperCAmelCase = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def lowercase__ ( self : Any , __UpperCamelCase : str )->dict[str, list[int]]:
_UpperCAmelCase = {} # returns a dict with keywords and list of its occurrences
_UpperCAmelCase = 0
for i in range(len(__UpperCamelCase ) ):
while (
self.find_next_state(__UpperCamelCase , string[i] ) is None
and current_state != 0
):
_UpperCAmelCase = self.adlist[current_state]['''fail_state''']
_UpperCAmelCase = self.find_next_state(__UpperCamelCase , string[i] )
if next_state is None:
_UpperCAmelCase = 0
else:
_UpperCAmelCase = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
_UpperCAmelCase = []
result[key].append(i - len(__UpperCamelCase ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 602 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class __lowercase ( A ):
'''simple docstring'''
_A : List[str] = '''nllb-moe'''
_A : List[str] = ['''past_key_values''']
_A : List[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , _a : Tuple=128_112 , _a : Optional[Any]=1_024 , _a : int=12 , _a : Any=4_096 , _a : Dict=16 , _a : Dict=12 , _a : Optional[int]=4_096 , _a : Union[str, Any]=16 , _a : Dict=0.05 , _a : Dict=0.05 , _a : str=True , _a : Union[str, Any]=True , _a : int="relu" , _a : List[Any]=1_024 , _a : Union[str, Any]=0.1 , _a : str=0.1 , _a : int=0.0 , _a : Dict=0.02 , _a : Any=2 , _a : int=True , _a : Optional[int]=False , _a : Any="float32" , _a : Optional[Any]=False , _a : Any=128 , _a : int=64 , _a : Dict=4 , _a : Tuple=4 , _a : Any=0.001 , _a : int=0.001 , _a : Union[str, Any]="all" , _a : Optional[int]=False , _a : str=False , _a : int=1.0 , _a : List[Any]=0.2 , _a : Union[str, Any]=1 , _a : Any=0 , _a : int=2 , _a : List[Any]=False , **_a : List[Any] , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = use_cache
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase__ = router_z_loss_coef
UpperCamelCase__ = router_aux_loss_coef
UpperCamelCase__ = decoder_sparse_step
UpperCamelCase__ = encoder_sparse_step
UpperCamelCase__ = num_experts
UpperCamelCase__ = expert_capacity
UpperCamelCase__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
UpperCamelCase__ = router_dtype
UpperCamelCase__ = router_ignore_padding_tokens
UpperCamelCase__ = batch_prioritized_routing
UpperCamelCase__ = second_expert_policy
UpperCamelCase__ = normalize_router_prob_before_dropping
UpperCamelCase__ = moe_eval_capacity_token_fraction
UpperCamelCase__ = moe_token_dropout
UpperCamelCase__ = output_router_logits
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , **_a , )
| 711 | import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
lowercase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : str, UpperCamelCase__ : List[str], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : List[str] ):
'''simple docstring'''
for attribute in key.split('''.''' ):
UpperCamelCase__ = getattr(UpperCamelCase__, UpperCamelCase__ )
if weight_type is not None:
UpperCamelCase__ = getattr(UpperCamelCase__, UpperCamelCase__ ).shape
else:
UpperCamelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCamelCase__ = value
elif weight_type == "weight_g":
UpperCamelCase__ = value
elif weight_type == "weight_v":
UpperCamelCase__ = value
elif weight_type == "bias":
UpperCamelCase__ = value
else:
UpperCamelCase__ = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : List[str], UpperCamelCase__ : List[str] ):
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = fairseq_model.state_dict()
UpperCamelCase__ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, hf_model.config.feat_extract_norm == '''group''', )
UpperCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCamelCase__ = True
if "*" in mapped_key:
UpperCamelCase__ = name.split(UpperCamelCase__ )[0].split('''.''' )[-2]
UpperCamelCase__ = mapped_key.replace('''*''', UpperCamelCase__ )
if "weight_g" in name:
UpperCamelCase__ = '''weight_g'''
elif "weight_v" in name:
UpperCamelCase__ = '''weight_v'''
elif "bias" in name:
UpperCamelCase__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ = '''weight'''
else:
UpperCamelCase__ = None
set_recursively(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ = full_name.split('''conv_layers.''' )[-1]
UpperCamelCase__ = name.split('''.''' )
UpperCamelCase__ = int(items[0] )
UpperCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCamelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCamelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCamelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCamelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : Dict, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : List[str]=None, UpperCamelCase__ : List[Any]=True ):
'''simple docstring'''
if config_path is not None:
UpperCamelCase__ = UniSpeechSatConfig.from_pretrained(UpperCamelCase__ )
else:
UpperCamelCase__ = UniSpeechSatConfig()
UpperCamelCase__ = ''''''
if is_finetuned:
UpperCamelCase__ = UniSpeechSatForCTC(UpperCamelCase__ )
else:
UpperCamelCase__ = UniSpeechSatForPreTraining(UpperCamelCase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
UpperCamelCase__ = model[0].eval()
recursively_load_weights(UpperCamelCase__, UpperCamelCase__ )
hf_wavavec.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowercase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 591 | 0 |
'''simple docstring'''
def a ( __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :List[Any] = [int(__a ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(__a ) == 4 and all(0 <= int(__a ) <= 254 for octet in octets )
if __name__ == "__main__":
__snake_case = input().strip()
__snake_case = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""") | 189 |
'''simple docstring'''
from __future__ import annotations
import math
def a ( __a , __a , __a , __a , __a ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(__a ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __a , __a , __a ) , minimax(depth + 1 , node_index * 2 + 1 , __a , __a , __a ) , )
return min(
minimax(depth + 1 , node_index * 2 , __a , __a , __a ) , minimax(depth + 1 , node_index * 2 + 1 , __a , __a , __a ) , )
def a ( ) -> None:
'''simple docstring'''
UpperCamelCase__ :str = [90, 23, 6, 33, 21, 65, 123, 34423]
UpperCamelCase__ :List[Any] = math.log(len(__a ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , __a , __a , __a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 189 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
snake_case_ : List[Any] = logging.get_logger(__name__)
class __snake_case ( a ):
UpperCAmelCase__ : str = '''linear'''
UpperCAmelCase__ : Optional[int] = '''cosine'''
UpperCAmelCase__ : Optional[Any] = '''cosine_with_restarts'''
UpperCAmelCase__ : Dict = '''polynomial'''
UpperCAmelCase__ : Optional[int] = '''constant'''
UpperCAmelCase__ : Union[str, Any] = '''constant_with_warmup'''
UpperCAmelCase__ : List[Any] = '''piecewise_constant'''
def A (__A : Optimizer , __A : int = -1 ) -> Any:
"""simple docstring"""
return LambdaLR(__A , lambda __A : 1 , last_epoch=__A )
def A (__A : Optimizer , __A : int , __A : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__A : int ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1.0 , __A ) )
return 1.0
return LambdaLR(__A , __A , last_epoch=__A )
def A (__A : Optimizer , __A : str , __A : int = -1 ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = {}
UpperCAmelCase_ = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
UpperCAmelCase_ , UpperCAmelCase_ = rule_str.split(''':''' )
UpperCAmelCase_ = int(__A )
UpperCAmelCase_ = float(__A )
UpperCAmelCase_ = value
UpperCAmelCase_ = float(rule_list[-1] )
def create_rules_function(__A : Union[str, Any] , __A : List[str] ):
def rule_func(__A : int ) -> float:
UpperCAmelCase_ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__A ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
UpperCAmelCase_ = create_rules_function(__A , __A )
return LambdaLR(__A , __A , last_epoch=__A )
def A (__A : int , __A : List[str] , __A : int , __A : List[str]=-1 ) -> Optional[Any]:
"""simple docstring"""
def lr_lambda(__A : int ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__A , __A , __A )
def A (__A : Optimizer , __A : int , __A : int , __A : float = 0.5 , __A : int = -1 ) -> Union[str, Any]:
"""simple docstring"""
def lr_lambda(__A : List[Any] ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__A ) * 2.0 * progress )) )
return LambdaLR(__A , __A , __A )
def A (__A : Optimizer , __A : int , __A : int , __A : int = 1 , __A : int = -1 ) -> List[Any]:
"""simple docstring"""
def lr_lambda(__A : str ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
UpperCAmelCase_ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__A ) * progress) % 1.0) )) )
return LambdaLR(__A , __A , __A )
def A (__A : int , __A : Any , __A : Optional[int] , __A : Tuple=1E-7 , __A : List[Any]=1.0 , __A : str=-1 ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__A : int ):
if current_step < num_warmup_steps:
return float(__A ) / float(max(1 , __A ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
UpperCAmelCase_ = lr_init - lr_end
UpperCAmelCase_ = num_training_steps - num_warmup_steps
UpperCAmelCase_ = 1 - (current_step - num_warmup_steps) / decay_steps
UpperCAmelCase_ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__A , __A , __A )
snake_case_ : List[str] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def A (__A : Union[str, SchedulerType] , __A : Optimizer , __A : Optional[str] = None , __A : Optional[int] = None , __A : Optional[int] = None , __A : int = 1 , __A : float = 1.0 , __A : int = -1 , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = SchedulerType(__A )
UpperCAmelCase_ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__A , last_epoch=__A )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__A , step_rules=__A , last_epoch=__A )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__A , num_warmup_steps=__A , last_epoch=__A )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__A , num_warmup_steps=__A , num_training_steps=__A , num_cycles=__A , last_epoch=__A , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__A , num_warmup_steps=__A , num_training_steps=__A , power=__A , last_epoch=__A , )
return schedule_func(
__A , num_warmup_steps=__A , num_training_steps=__A , last_epoch=__A )
| 169 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __snake_case :
def __init__( self : Dict , _snake_case : Optional[int] , _snake_case : int , _snake_case : int):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''')
UpperCAmelCase_ = img
UpperCAmelCase_ = img.shape[1]
UpperCAmelCase_ = img.shape[0]
UpperCAmelCase_ = dst_width
UpperCAmelCase_ = dst_height
UpperCAmelCase_ = self.src_w / self.dst_w
UpperCAmelCase_ = self.src_h / self.dst_h
UpperCAmelCase_ = UpperCAmelCase_ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 255
)
def lowerCamelCase ( self : str):
"""simple docstring"""
for i in range(self.dst_h):
for j in range(self.dst_w):
UpperCAmelCase_ = self.img[self.get_y(_snake_case)][self.get_x(_snake_case)]
def lowerCamelCase ( self : int , _snake_case : int):
"""simple docstring"""
return int(self.ratio_x * x)
def lowerCamelCase ( self : List[str] , _snake_case : int):
"""simple docstring"""
return int(self.ratio_y * y)
if __name__ == "__main__":
snake_case_ , snake_case_ : List[Any] = 800, 600
snake_case_ : Optional[Any] = imread("image_data/lena.jpg", 1)
snake_case_ : Any = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 169 | 1 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __SCREAMING_SNAKE_CASE( nn.Module ):
def __init__( self: List[Any] ) -> Union[str, Any]:
super().__init__()
snake_case__ = nn.Linear(3 , 4 )
snake_case__ = nn.BatchNormad(4 )
snake_case__ = nn.Linear(4 , 5 )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: int ) -> Tuple:
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase ) ) )
class __SCREAMING_SNAKE_CASE( a_ ):
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Union[str, Any] , *UpperCamelCase: int , **UpperCamelCase: Tuple ) -> Union[str, Any]:
return (args[0] + 1,) + args[1:], kwargs
class __SCREAMING_SNAKE_CASE( a_ ):
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Tuple , UpperCamelCase: Tuple ) -> str:
return output + 1
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]:
snake_case__ = ModelForTest()
snake_case__ = ModelHook()
add_hook_to_module(UpperCamelCase , UpperCamelCase )
self.assertEqual(test_model._hf_hook , UpperCamelCase )
self.assertTrue(hasattr(UpperCamelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(UpperCamelCase )
self.assertFalse(hasattr(UpperCamelCase , '_hf_hook' ) )
self.assertFalse(hasattr(UpperCamelCase , '_old_forward' ) )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]:
snake_case__ = ModelForTest()
snake_case__ = ModelHook()
add_hook_to_module(UpperCamelCase , UpperCamelCase )
add_hook_to_module(UpperCamelCase , UpperCamelCase , append=UpperCamelCase )
self.assertEqual(isinstance(test_model._hf_hook , UpperCamelCase ) , UpperCamelCase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(UpperCamelCase , '_old_forward' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , 'forward' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] )
remove_hook_from_module(UpperCamelCase )
self.assertFalse(hasattr(UpperCamelCase , '_hf_hook' ) )
self.assertFalse(hasattr(UpperCamelCase , '_old_forward' ) )
def lowerCAmelCase_ ( self: List[Any] ) -> Optional[Any]:
snake_case__ = ModelForTest()
snake_case__ = torch.randn(2 , 3 )
snake_case__ = test_model(x + 1 )
snake_case__ = test_model(x + 2 )
snake_case__ = PreForwardHook()
add_hook_to_module(UpperCamelCase , UpperCamelCase )
snake_case__ = test_model(UpperCamelCase )
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
snake_case__ = PreForwardHook()
add_hook_to_module(UpperCamelCase , UpperCamelCase )
snake_case__ = test_model(UpperCamelCase )
self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
snake_case__ = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(UpperCamelCase , UpperCamelCase )
snake_case__ = test_model(UpperCamelCase )
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-5 )
def lowerCAmelCase_ ( self: Dict ) -> Union[str, Any]:
snake_case__ = ModelForTest()
snake_case__ = torch.randn(2 , 3 )
snake_case__ = test_model(UpperCamelCase )
snake_case__ = PostForwardHook()
add_hook_to_module(UpperCamelCase , UpperCamelCase )
snake_case__ = test_model(UpperCamelCase )
self.assertTrue(torch.allclose(UpperCamelCase , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
snake_case__ = PostForwardHook()
add_hook_to_module(UpperCamelCase , UpperCamelCase )
snake_case__ = test_model(UpperCamelCase )
self.assertTrue(torch.allclose(UpperCamelCase , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
snake_case__ = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(UpperCamelCase , UpperCamelCase )
snake_case__ = test_model(UpperCamelCase )
assert torch.allclose(UpperCamelCase , output + 2 , atol=1e-5 )
def lowerCAmelCase_ ( self: Any ) -> Any:
snake_case__ = ModelForTest()
snake_case__ = torch.randn(2 , 3 )
snake_case__ = test_model(UpperCamelCase )
snake_case__ = PostForwardHook()
add_hook_to_module(UpperCamelCase , UpperCamelCase )
snake_case__ = test_model(UpperCamelCase )
self.assertTrue(torch.allclose(UpperCamelCase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
snake_case__ = True
snake_case__ = test_model(UpperCamelCase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowerCAmelCase_ ( self: Tuple ) -> List[str]:
snake_case__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
snake_case__ = torch.randn(2 , 3 )
snake_case__ = model(UpperCamelCase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(UpperCamelCase , AlignDevicesHook(io_same_device=UpperCamelCase ) )
snake_case__ = torch.randn(2 , 3 ).to(0 )
snake_case__ = model(UpperCamelCase )
self.assertEqual(output.device , torch.device(0 ) )
def lowerCAmelCase_ ( self: str ) -> Any:
snake_case__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
snake_case__ = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCamelCase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
snake_case__ = torch.device(hook_kwargs['execution_device'] )
self.assertEqual(model.batchnorm.running_mean.device , UpperCamelCase )
snake_case__ = torch.randn(2 , 3 )
snake_case__ = model(UpperCamelCase )
self.assertEqual(output.device , UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
snake_case__ = {
'execution_device': 0 if torch.cuda.is_available() else 'cpu',
'offload': True,
'offload_buffers': True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCamelCase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCamelCase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCamelCase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
snake_case__ = torch.randn(2 , 3 )
snake_case__ = model(UpperCamelCase )
self.assertEqual(output.device , UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple:
snake_case__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
snake_case__ = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(UpperCamelCase , execution_device=UpperCamelCase , offload=UpperCamelCase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
snake_case__ = torch.device(UpperCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , UpperCamelCase )
snake_case__ = torch.randn(2 , 3 )
snake_case__ = model(UpperCamelCase )
self.assertEqual(output.device , UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(UpperCamelCase , execution_device=UpperCamelCase , offload=UpperCamelCase , offload_buffers=UpperCamelCase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
snake_case__ = torch.randn(2 , 3 )
snake_case__ = model(UpperCamelCase )
self.assertEqual(output.device , UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[Any]:
snake_case__ = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# This will move each submodule on different devices
snake_case__ = 0 if torch.cuda.is_available() else 'cpu'
attach_align_device_hook(
UpperCamelCase , execution_device=UpperCamelCase , offload=UpperCamelCase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
# Buffers are not included in the offload by default, so are on the execution device
snake_case__ = torch.device(UpperCamelCase )
self.assertEqual(model.batchnorm.running_mean.device , UpperCamelCase )
snake_case__ = torch.randn(2 , 3 )
snake_case__ = model(UpperCamelCase )
self.assertEqual(output.device , UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
UpperCamelCase , execution_device=UpperCamelCase , offload=UpperCamelCase , weights_map=model.state_dict() , offload_buffers=UpperCamelCase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) )
self.assertEqual(model.lineara.weight.device , torch.device('meta' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) )
snake_case__ = torch.randn(2 , 3 )
snake_case__ = model(UpperCamelCase )
self.assertEqual(output.device , UpperCamelCase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(UpperCamelCase )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) )
self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
| 328 |
import os
import sys
import unittest
__UpperCamelCase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__UpperCamelCase : List[Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: Tuple ) -> Dict:
snake_case__ = find_backend(' if not is_torch_available():' )
self.assertEqual(UpperCamelCase , 'torch' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case__ = find_backend(' if not (is_torch_available() and is_transformers_available()):' )
self.assertEqual(UpperCamelCase , 'torch_and_transformers' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case__ = find_backend(
' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' )
self.assertEqual(UpperCamelCase , 'torch_and_transformers_and_onnx' )
def lowerCAmelCase_ ( self: int ) -> Dict:
snake_case__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , UpperCamelCase )
self.assertIn('torch_and_transformers' , UpperCamelCase )
self.assertIn('flax_and_transformers' , UpperCamelCase )
self.assertIn('torch_and_transformers_and_onnx' , UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('UNet2DModel' , objects['torch'] )
self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] )
self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] )
self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] )
self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] )
self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] )
def lowerCAmelCase_ ( self: int ) -> str:
snake_case__ = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(UpperCamelCase , '\nCONSTANT = None\n' )
snake_case__ = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
UpperCamelCase , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case__ = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, \'torch\')\n'
snake_case__ = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] ) -> int:
snake_case__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, ["torch"])\n'
snake_case__ = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , UpperCamelCase )
| 328 | 1 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowercase = {"UserAgent": UserAgent().random}
def __UpperCAmelCase ( a_):
snake_case_ = script.contents[0]
snake_case_ = json.loads(data[data.find('{\"config\"') : -1])
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , a ) -> Union[str, Any]:
snake_case_ = F'''https://www.instagram.com/{username}/'''
snake_case_ = self.get_json()
def _UpperCamelCase ( self ) -> dict:
snake_case_ = requests.get(self.url , headers=__a ).text
snake_case_ = BeautifulSoup(__a , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ) -> str:
return F'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ) -> str:
return F'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def _UpperCamelCase ( self ) -> str:
return self.user_data["username"]
@property
def _UpperCamelCase ( self ) -> str:
return self.user_data["full_name"]
@property
def _UpperCamelCase ( self ) -> str:
return self.user_data["biography"]
@property
def _UpperCamelCase ( self ) -> str:
return self.user_data["business_email"]
@property
def _UpperCamelCase ( self ) -> str:
return self.user_data["external_url"]
@property
def _UpperCamelCase ( self ) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def _UpperCamelCase ( self ) -> int:
return self.user_data["edge_follow"]["count"]
@property
def _UpperCamelCase ( self ) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _UpperCamelCase ( self ) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def _UpperCamelCase ( self ) -> bool:
return self.user_data["is_verified"]
@property
def _UpperCamelCase ( self ) -> bool:
return self.user_data["is_private"]
def __UpperCAmelCase ( a_ = "github"):
import os
if os.environ.get('CI'):
return # test failing on GitHub Actions
snake_case_ = InstagramUser(lowercase_)
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase_)
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.')
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase = InstagramUser("github")
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }')
| 707 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
lowercase = logging.getLogger(__name__)
@dataclass
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''whether to use adafactor'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(default=snake_case_ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(
default=snake_case_ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
lowerCAmelCase = field(
default='''linear''' , metadata={'''help''': F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} , )
| 607 | 0 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
lowercase_ = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 291 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def A ( lowercase__ : dict ) -> tuple:
return (data["data"], data["target"])
def A ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier:
UpperCamelCase__ :Tuple = XGBClassifier()
classifier.fit(lowercase__ , lowercase__ )
return classifier
def A ( ) -> None:
UpperCamelCase__ :str = load_iris()
UpperCamelCase__ , UpperCamelCase__ :int = data_handling(lowercase__ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :int = train_test_split(
lowercase__ , lowercase__ , test_size=0.25 )
UpperCamelCase__ :Optional[int] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
UpperCamelCase__ :Optional[Any] = xgboost(lowercase__ , lowercase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main() | 45 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> Dict:
"""simple docstring"""
a_ : Tuple = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
a_ : List[str] = True if 'large' in model_name or 'huge' in model_name else False
a_ : str = True if 'large' in model_name or 'huge' in model_name else False
a_ : Tuple = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a_ : List[str] = [3, 3, 3, 3]
a_ : Tuple = [5, 5, 5, 5]
elif "fl4" in model_name:
a_ : List[str] = [4, 4, 4, 4]
a_ : List[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a_ : Dict = [3, 3, 3, 3]
if "lrf" in model_name:
a_ : Union[str, Any] = [3, 3, 3, 3]
else:
a_ : Any = [2, 2, 2, 2]
if "tiny" in model_name:
a_ : Tuple = 96
elif "small" in model_name:
a_ : Optional[int] = 96
elif "base" in model_name:
a_ : str = 1_28
elif "large" in model_name:
a_ : str = 1_92
elif "xlarge" in model_name:
a_ : int = 2_56
elif "huge" in model_name:
a_ : Optional[int] = 3_52
# set label information
a_ : str = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
a_ : Tuple = 'imagenet-22k-id2label.json'
else:
a_ : int = 'imagenet-1k-id2label.json'
a_ : Optional[int] = json.load(open(hf_hub_download(__A , __A , repo_type='dataset' ) , 'r' ) )
a_ : Optional[int] = {int(__A ): v for k, v in idalabel.items()}
a_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
a_ : Optional[Any] = FocalNetConfig(
embed_dim=__A , depths=__A , focal_levels=__A , focal_windows=__A , use_conv_embed=__A , idalabel=__A , labelaid=__A , use_post_layernorm=__A , use_layerscale=__A , )
return config
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name:
a_ : Dict = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a_ : Any = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a_ : List[Any] = 'encoder.' + name
if "encoder.layers" in name:
a_ : str = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
a_ : Optional[Any] = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
a_ : Any = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a_ : Optional[int] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a_ : Optional[int] = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a_ : Optional[int] = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
a_ : Dict = 'layernorm.weight'
if name == "norm.bias":
a_ : Optional[Any] = 'layernorm.bias'
if "head" in name:
a_ : Dict = name.replace('head' , 'classifier' )
else:
a_ : int = 'focalnet.' + name
return name
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : Union[str, Any] , __A : int=False ) -> List[Any]:
"""simple docstring"""
a_ : Tuple = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
a_ : str = model_name_to_url[model_name]
print('Checkpoint URL: ' , __A )
a_ : List[Any] = torch.hub.load_state_dict_from_url(__A , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
a_ : List[str] = state_dict.pop(__A )
a_ : Optional[Any] = val
a_ : Optional[int] = get_focalnet_config(__A )
a_ : Any = FocalNetForImageClassification(__A )
model.eval()
# load state dict
model.load_state_dict(__A )
# verify conversion
a_ : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a_ : Optional[int] = BitImageProcessor(
do_resize=__A , size={'shortest_edge': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=__A , crop_size=2_24 , do_normalize=__A , image_mean=__A , image_std=__A , )
a_ : Tuple = Image.open(requests.get(__A , stream=__A ).raw )
a_ : Optional[int] = processor(images=__A , return_tensors='pt' )
a_ : int = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a_ : str = image_transforms(__A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , __A , atol=1e-4 )
a_ : List[Any] = model(**__A )
a_ : List[Any] = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a_ : Optional[int] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a_ : int = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a_ : Any = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a_ : Any = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a_ : Union[str, Any] = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a_ : int = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
UpperCAmelCase_ : Optional[int] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 443 |
import argparse
import os
import re
UpperCAmelCase_ : List[Any] = 'src/transformers'
# Pattern that looks at the indentation in a line.
UpperCAmelCase_ : Any = re.compile(R'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCAmelCase_ : str = re.compile(R'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCAmelCase_ : Dict = re.compile(R'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCAmelCase_ : int = re.compile(R'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCAmelCase_ : List[str] = re.compile(R'\[([^\]]+)\]')
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a_ : List[Any] = _re_indent.search(__A )
return "" if search is None else search.groups()[0]
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : Union[str, Any]="" , __A : Dict=None , __A : Dict=None ) -> int:
"""simple docstring"""
a_ : Tuple = 0
a_ : Dict = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(__A ):
index += 1
a_ : List[Any] = ['\n'.join(lines[:index] )]
else:
a_ : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
a_ : Tuple = [lines[index]]
index += 1
while index < len(__A ) and (end_prompt is None or not lines[index].startswith(__A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(__A ) )
if index < len(__A ) - 1:
a_ : Dict = [lines[index + 1]]
index += 1
else:
a_ : Dict = []
else:
blocks.append('\n'.join(__A ) )
a_ : int = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__A ) > 0:
blocks.append('\n'.join(__A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__A ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> Any:
"""simple docstring"""
def _inner(__A : Tuple ):
return key(__A ).lower().replace('_' , '' )
return _inner
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : List[Any]=None ) -> List[str]:
"""simple docstring"""
def noop(__A : Tuple ):
return x
if key is None:
a_ : Optional[Any] = noop
# Constants are all uppercase, they go first.
a_ : List[Any] = [obj for obj in objects if key(__A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
a_ : Dict = [obj for obj in objects if key(__A )[0].isupper() and not key(__A ).isupper()]
# Functions begin with a lowercase, they go last.
a_ : Optional[int] = [obj for obj in objects if not key(__A )[0].isupper()]
a_ : Optional[Any] = ignore_underscore(__A )
return sorted(__A , key=__A ) + sorted(__A , key=__A ) + sorted(__A , key=__A )
def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Optional[int]:
"""simple docstring"""
def _replace(__A : List[Any] ):
a_ : Tuple = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
a_ : int = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a_ : str = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(__A )] ) + "]"
a_ : Optional[int] = import_statement.split('\n' )
if len(__A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
a_ : int = 2 if lines[1].strip() == '[' else 1
a_ : int = [(i, _re_strip_line.search(__A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
a_ : str = sort_objects(__A , key=lambda __A : x[1] )
a_ : int = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
a_ : str = _re_bracket_content.sub(_replace , lines[1] )
else:
a_ : List[str] = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
a_ : Optional[int] = keys[:-1]
a_ : Any = get_indent(lines[1] ) + ', '.join([F"""\"{k}\"""" for k in sort_objects(__A )] )
return "\n".join(__A )
else:
# Finally we have to deal with imports fitting on one line
a_ : Union[str, Any] = _re_bracket_content.sub(_replace , __A )
return import_statement
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] , __A : Any=True ) -> Union[str, Any]:
"""simple docstring"""
with open(__A , encoding='utf-8' ) as f:
a_ : Union[str, Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
a_ : Optional[Any] = split_code_in_indented_blocks(
__A , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
a_ : Any = main_blocks[block_idx]
a_ : Any = block.split('\n' )
# Get to the start of the imports.
a_ : Any = 0
while line_idx < len(__A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
a_ : Tuple = len(__A )
else:
line_idx += 1
if line_idx >= len(__A ):
continue
# Ignore beginning and last line: they don't contain anything.
a_ : List[str] = '\n'.join(block_lines[line_idx:-1] )
a_ : List[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
a_ : Optional[int] = split_code_in_indented_blocks(__A , indent_level=__A )
# We have two categories of import key: list or _import_structure[key].append/extend
a_ : Tuple = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
a_ : Union[str, Any] = [(pattern.search(__A ).groups()[0] if pattern.search(__A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
a_ : str = [(i, key) for i, key in enumerate(__A ) if key is not None]
a_ : Optional[int] = [x[0] for x in sorted(__A , key=lambda __A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
a_ : int = 0
a_ : int = []
for i in range(len(__A ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
a_ : List[str] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__A )
count += 1
# And we put our main block back together with its first and last line.
a_ : Any = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__A ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(__A , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(__A ) )
def SCREAMING_SNAKE_CASE_ ( __A : List[Any]=True ) -> List[Any]:
"""simple docstring"""
a_ : Dict = []
for root, _, files in os.walk(__A ):
if "__init__.py" in files:
a_ : Dict = sort_imports(os.path.join(__A , '__init__.py' ) , check_only=__A )
if result:
a_ : Tuple = [os.path.join(__A , '__init__.py' )]
if len(__A ) > 0:
raise ValueError(F"""Would overwrite {len(__A )} files, run `make style`.""" )
if __name__ == "__main__":
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
UpperCAmelCase_ : Tuple = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 443 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __UpperCAmelCase( lowercase_ , lowercase_=0.9_9_9 , lowercase_="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase_ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase_ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_lowerCamelCase : List[Any] = []
for i in range(lowercase_ ):
_lowerCamelCase : Union[str, Any] = i / num_diffusion_timesteps
_lowerCamelCase : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase_ ) / alpha_bar_fn(lowercase_ ) , lowercase_ ) )
return torch.tensor(lowercase_ , dtype=torch.floataa )
class __A ( lowerCamelCase__ ,lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ = 2
@register_to_config
def __init__( self , a__ = 1000 , a__ = 0.0_0085 , a__ = 0.012 , a__ = "linear" , a__ = None , a__ = "epsilon" , a__ = "linspace" , a__ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
_lowerCamelCase : Dict = torch.tensor(a__ , dtype=torch.floataa)
elif beta_schedule == "linear":
_lowerCamelCase : Optional[int] = torch.linspace(a__ , a__ , a__ , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCamelCase : Optional[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a__ , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCamelCase : Tuple = betas_for_alpha_bar(a__)
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""")
_lowerCamelCase : Optional[Any] = 1.0 - self.betas
_lowerCamelCase : str = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(a__ , a__ , a__)
def __snake_case ( self , a__ , a__=None):
"""simple docstring"""
if schedule_timesteps is None:
_lowerCamelCase : Union[str, Any] = self.timesteps
_lowerCamelCase : Union[str, Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
_lowerCamelCase : List[Any] = 1 if len(a__) > 1 else 0
else:
_lowerCamelCase : List[Any] = timestep.cpu().item() if torch.is_tensor(a__) else timestep
_lowerCamelCase : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __snake_case ( self):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __snake_case ( self , a__ , a__ , ):
"""simple docstring"""
_lowerCamelCase : Any = self.index_for_timestep(a__)
if self.state_in_first_order:
_lowerCamelCase : Optional[Any] = self.sigmas[step_index]
else:
_lowerCamelCase : List[Any] = self.sigmas_interpol[step_index]
_lowerCamelCase : List[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __snake_case ( self , a__ , a__ = None , a__ = None , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = num_inference_steps
_lowerCamelCase : str = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_lowerCamelCase : int = np.linspace(0 , num_train_timesteps - 1 , a__ , dtype=a__)[::-1].copy()
elif self.config.timestep_spacing == "leading":
_lowerCamelCase : int = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCamelCase : Tuple = (np.arange(0 , a__) * step_ratio).round()[::-1].copy().astype(a__)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_lowerCamelCase : List[Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCamelCase : Any = (np.arange(a__ , 0 , -step_ratio)).round().copy().astype(a__)
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""")
_lowerCamelCase : Dict = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
_lowerCamelCase : List[Any] = torch.from_numpy(np.log(a__)).to(a__)
_lowerCamelCase : Tuple = np.interp(a__ , np.arange(0 , len(a__)) , a__)
_lowerCamelCase : Any = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
_lowerCamelCase : Union[str, Any] = torch.from_numpy(a__).to(device=a__)
# interpolate sigmas
_lowerCamelCase : List[str] = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp()
_lowerCamelCase : Optional[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]])
_lowerCamelCase : Any = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]])
if str(a__).startswith('''mps'''):
# mps does not support float64
_lowerCamelCase : Dict = torch.from_numpy(a__).to(a__ , dtype=torch.floataa)
else:
_lowerCamelCase : Union[str, Any] = torch.from_numpy(a__).to(a__)
# interpolate timesteps
_lowerCamelCase : Union[str, Any] = self.sigma_to_t(a__).to(a__ , dtype=timesteps.dtype)
_lowerCamelCase : List[str] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten()
_lowerCamelCase : Optional[Any] = torch.cat([timesteps[:1], interleaved_timesteps])
_lowerCamelCase : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_lowerCamelCase : Optional[int] = defaultdict(a__)
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : List[str] = sigma.log()
# get distribution
_lowerCamelCase : List[Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
_lowerCamelCase : str = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2)
_lowerCamelCase : Union[str, Any] = low_idx + 1
_lowerCamelCase : List[Any] = self.log_sigmas[low_idx]
_lowerCamelCase : Optional[int] = self.log_sigmas[high_idx]
# interpolate sigmas
_lowerCamelCase : Tuple = (low - log_sigma) / (low - high)
_lowerCamelCase : List[str] = w.clamp(0 , 1)
# transform interpolation to time range
_lowerCamelCase : Dict = (1 - w) * low_idx + w * high_idx
_lowerCamelCase : List[str] = t.view(sigma.shape)
return t
@property
def __snake_case ( self):
"""simple docstring"""
return self.sample is None
def __snake_case ( self , a__ , a__ , a__ , a__ = True , ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = self.index_for_timestep(a__)
# advance index counter by 1
_lowerCamelCase : Tuple = timestep.cpu().item() if torch.is_tensor(a__) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_lowerCamelCase : int = self.sigmas[step_index]
_lowerCamelCase : Optional[int] = self.sigmas_interpol[step_index + 1]
_lowerCamelCase : Optional[int] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
_lowerCamelCase : List[str] = self.sigmas[step_index - 1]
_lowerCamelCase : Union[str, Any] = self.sigmas_interpol[step_index]
_lowerCamelCase : str = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_lowerCamelCase : int = sigma_hat if self.state_in_first_order else sigma_interpol
_lowerCamelCase : Dict = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_lowerCamelCase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
_lowerCamelCase : Any = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''')
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""")
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_lowerCamelCase : Union[str, Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_lowerCamelCase : Optional[Any] = sigma_interpol - sigma_hat
# store for 2nd order step
_lowerCamelCase : Any = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
_lowerCamelCase : Union[str, Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
_lowerCamelCase : Any = sigma_next - sigma_hat
_lowerCamelCase : Optional[Any] = self.sample
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : Optional[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a__)
def __snake_case ( self , a__ , a__ , a__ , ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(a__):
# mps does not support float64
_lowerCamelCase : str = self.timesteps.to(original_samples.device , dtype=torch.floataa)
_lowerCamelCase : str = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
_lowerCamelCase : str = self.timesteps.to(original_samples.device)
_lowerCamelCase : int = timesteps.to(original_samples.device)
_lowerCamelCase : int = [self.index_for_timestep(a__ , a__) for t in timesteps]
_lowerCamelCase : List[Any] = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
_lowerCamelCase : List[str] = sigma.unsqueeze(-1)
_lowerCamelCase : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self):
"""simple docstring"""
return self.config.num_train_timesteps
| 114 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A ( lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = OpenAIGPTTokenizer
UpperCAmelCase__ = OpenAIGPTTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = False
def __snake_case ( self):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
_lowerCamelCase : Any = dict(zip(a__ , range(len(a__))))
_lowerCamelCase : List[str] = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
_lowerCamelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''') as fp:
fp.write(json.dumps(a__))
with open(self.merges_file , '''w''') as fp:
fp.write('''\n'''.join(a__))
def __snake_case ( self , a__):
"""simple docstring"""
return "lower newer", "lower newer"
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file)
_lowerCamelCase : Dict = '''lower'''
_lowerCamelCase : Optional[Any] = ['''low''', '''er</w>''']
_lowerCamelCase : List[Any] = tokenizer.tokenize(a__)
self.assertListEqual(a__ , a__)
_lowerCamelCase : Any = tokens + ['''<unk>''']
_lowerCamelCase : Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , a__)
def __snake_case ( self , a__=15):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
_lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(a__ , **a__)
# Simple input
_lowerCamelCase : Optional[int] = '''This is a simple input'''
_lowerCamelCase : Optional[Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
_lowerCamelCase : Optional[int] = ('''This is a simple input''', '''This is a pair''')
_lowerCamelCase : List[Any] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding='''max_length''')
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding='''max_length''')
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding='''max_length''' , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding='''max_length''')
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding='''max_length''')
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding='''max_length''' , )
def __snake_case ( self):
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __A ( lowerCamelCase__ ):
"""simple docstring"""
pass
| 114 | 1 |
"""simple docstring"""
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class a__ :
snake_case__ = 4_2
snake_case__ = None
snake_case__ = None
def UpperCAmelCase ( snake_case : Optional[int] ):
# Validation
def is_valid_tree(snake_case : List[str] ) -> bool:
if node is None:
return True
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowerCAmelCase__ ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
snake_case : str , snake_case : str , snake_case : List[Any] ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , lowerCAmelCase__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , lowerCAmelCase__ )
)
return is_binary_search_tree_recursive_check(lowerCAmelCase__ , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class a__ ( UpperCamelCase_ ):
def __init__( self : Dict ,*a__ : List[str] ,**a__ : str) -> None:
"""simple docstring"""
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' ,a__ ,)
super().__init__(*a__ ,**a__)
| 439 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def _A ( _a : Sequence[float] , _a : float ):
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(_a ) )
def _A ( _a : Sequence[float] , _a : float ):
"""simple docstring"""
A = 0.0
for coeff in reversed(_a ):
A = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase =(0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase =10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 617 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase ={
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =[
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 617 | 1 |
__UpperCAmelCase = "Tobias Carryer"
from time import time
class a_:
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int=int(time())) -> str: # noqa: B008
"""simple docstring"""
SCREAMING_SNAKE_CASE = multiplier
SCREAMING_SNAKE_CASE = increment
SCREAMING_SNAKE_CASE = modulo
SCREAMING_SNAKE_CASE = seed
def __UpperCamelCase ( self : Tuple) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__UpperCAmelCase = LinearCongruentialGenerator(1_6_6_4_5_2_5, 1_0_1_3_9_0_4_2_2_3, 2 << 3_1)
while True:
print(lcg.next_number())
| 720 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__UpperCAmelCase = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class a_( unittest.TestCase ):
"""simple docstring"""
__snake_case : Union[str, Any] =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__snake_case : Dict =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__snake_case : Optional[Any] ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__snake_case : Any ={
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def __UpperCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt')
SCREAMING_SNAKE_CASE = text_classifier('This is great !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}])
SCREAMING_SNAKE_CASE = text_classifier('This is great !' , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}])
SCREAMING_SNAKE_CASE = text_classifier(['This is great !', 'This is bad'] , top_k=2)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
] , )
SCREAMING_SNAKE_CASE = text_classifier('This is great !' , top_k=1)
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}])
# Legacy behavior
SCREAMING_SNAKE_CASE = text_classifier('This is great !' , return_all_scores=lowerCAmelCase__)
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}])
SCREAMING_SNAKE_CASE = text_classifier('This is great !' , return_all_scores=lowerCAmelCase__)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}]])
SCREAMING_SNAKE_CASE = text_classifier(['This is great !', 'Something else'] , return_all_scores=lowerCAmelCase__)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
] , )
SCREAMING_SNAKE_CASE = text_classifier(['This is great !', 'Something else'] , return_all_scores=lowerCAmelCase__)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [
{'label': 'LABEL_0', 'score': 0.5_04},
{'label': 'LABEL_0', 'score': 0.5_04},
] , )
@require_torch
def __UpperCamelCase ( self : str) -> Dict:
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu') , )
SCREAMING_SNAKE_CASE = text_classifier('This is great !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}])
@require_tf
def __UpperCamelCase ( self : int) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf')
SCREAMING_SNAKE_CASE = text_classifier('This is great !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'LABEL_0', 'score': 0.5_04}])
@slow
@require_torch
def __UpperCamelCase ( self : List[Any]) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline('text-classification')
SCREAMING_SNAKE_CASE = text_classifier('This is great !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'POSITIVE', 'score': 1.0}])
SCREAMING_SNAKE_CASE = text_classifier('This is bad !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'NEGATIVE', 'score': 1.0}])
SCREAMING_SNAKE_CASE = text_classifier('Birds are a type of animal')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'POSITIVE', 'score': 0.9_88}])
@slow
@require_tf
def __UpperCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = pipeline('text-classification' , framework='tf')
SCREAMING_SNAKE_CASE = text_classifier('This is great !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'POSITIVE', 'score': 1.0}])
SCREAMING_SNAKE_CASE = text_classifier('This is bad !')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'NEGATIVE', 'score': 1.0}])
SCREAMING_SNAKE_CASE = text_classifier('Birds are a type of animal')
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': 'POSITIVE', 'score': 0.9_88}])
def __UpperCamelCase ( self : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = TextClassificationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__)
return text_classifier, ["HuggingFace is in", "This is another test"]
def __UpperCamelCase ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
SCREAMING_SNAKE_CASE = 'HuggingFace is in'
SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__)
self.assertEqual(nested_simplify(lowerCAmelCase__) , [{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}])
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values())
SCREAMING_SNAKE_CASE = ['HuggingFace is in ', 'Paris is in France']
SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}, {'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values())
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values())
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__ , top_k=lowerCAmelCase__)
SCREAMING_SNAKE_CASE = len(model.config.idalabel.values())
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [[{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}] * N, [{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}] * N] , )
SCREAMING_SNAKE_CASE = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
SCREAMING_SNAKE_CASE = text_classifier(lowerCAmelCase__)
self.assertEqual(
nested_simplify(lowerCAmelCase__) , {'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values())
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
SCREAMING_SNAKE_CASE = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(lowerCAmelCase__):
text_classifier(lowerCAmelCase__)
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
SCREAMING_SNAKE_CASE = text_classifier([[['HuggingFace is in ', 'Paris is in France']]])
self.assertEqual(
nested_simplify(lowerCAmelCase__) , [{'label': ANY(lowerCAmelCase__), 'score': ANY(lowerCAmelCase__)}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values())
| 259 | 0 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def lowerCAmelCase_ ( ) -> None:
assert or_gate(0, 0 ) == 0
assert or_gate(0, 1 ) == 1
assert or_gate(1, 0 ) == 1
assert or_gate(1, 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1)) | 237 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__: Optional[int] = logging.get_logger(__name__)
lowerCAmelCase__: List[Any] = {
"uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json",
"uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json",
"uclanlp/visualbert-vqa-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json",
"uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json",
"uclanlp/visualbert-vcr-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json"
),
"uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json",
"uclanlp/visualbert-nlvr2-coco-pre": (
"https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json"
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class snake_case_ ( lowerCAmelCase ):
__lowerCamelCase : List[str] = 'visual_bert'
def __init__( self , __lowerCAmelCase=30_522 , __lowerCAmelCase=768 , __lowerCAmelCase=512 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3_072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=1e-12 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Tuple = hidden_size
SCREAMING_SNAKE_CASE_ : Any = visual_embedding_dim
SCREAMING_SNAKE_CASE_ : str = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Any = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_ : str = layer_norm_eps
SCREAMING_SNAKE_CASE_ : List[Any] = bypass_transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = special_visual_initialize
| 345 | 0 |
def lowerCamelCase__ ( a : list[list] ) -> list[list]:
"""simple docstring"""
a__ :Any = current_set.copy()
for row_index, row in enumerate(a ):
a__ :List[str] = row[0]
for column_index, column in enumerate(a ):
if magnitude == 0:
a__ :Tuple = column
continue
a__ :Tuple = column / magnitude
# Subtract to cancel term
a__ :Any = current_set[0]
a__ :Dict = [first_row]
a__ :Dict = current_set[1::]
for row in current_set:
a__ :int = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(a )
continue
for column_index in range(len(a ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(a )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
a__ :List[str] = final_set[0]
a__ :Optional[Any] = []
a__ :List[Any] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
a__ :Dict = simplify(a )
for i in range(len(a ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , a )
a__ :Optional[int] = resultant
return final_set
def lowerCamelCase__ ( a : list[list] ) -> list:
"""simple docstring"""
if len(a ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
a__ :Any = len(a ) + 1
if any(len(a ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(a , (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(a ) == 1:
return [equations[0][-1] / equations[0][0]]
a__ :Union[str, Any] = equations.copy()
if any(0 in row for row in data_set ):
a__ :int = data_set.copy()
a__ :Dict = []
for row_index, row in enumerate(a ):
if 0 not in row:
a__ :Union[str, Any] = data_set.pop(a )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 , a )
a__ :Optional[int] = data_set.copy()
a__ :Any = simplify(a )
a__ :str = simplified[::-1]
a__ :list = []
for row in simplified:
a__ :Optional[int] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
a__ :Dict = row.copy()[: len(a ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(a ) == 0:
solutions.append(0 )
continue
a__ :List[Any] = temp_row[1::]
a__ :Union[str, Any] = temp_row[::-1]
for column_index, column in enumerate(a ):
current_solution -= column * solutions[column_index]
solutions.append(a )
a__ :List[str] = []
for item in solutions:
final.append(float(round(a , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 373 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowerCAmelCase_ :
def __init__( self : str , __A : str = "cpu" , __A : str = "openai/clip-vit-large-patch14" ) ->None:
"""simple docstring"""
a__ :List[str] = device
a__ :Optional[Any] = CLIPTokenizerFast.from_pretrained(__A )
a__ :str = [0.48_145_466, 0.4_578_275, 0.40_821_073]
a__ :Any = [0.26_862_954, 0.26_130_258, 0.27_577_711]
a__ :Union[str, Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
a__ :List[Any] = torchvision.transforms.Resize(224 )
a__ :Optional[Any] = torchvision.transforms.CenterCrop(224 )
def _snake_case ( self : int , __A : int ) ->List[Any]:
"""simple docstring"""
a__ :List[str] = self.resize(__A )
a__ :Union[str, Any] = self.center_crop(__A )
a__ :List[str] = self.normalize(__A )
return images
def __call__( self : Union[str, Any] , __A : Dict=None , __A : List[Any]=None , **__A : Union[str, Any] ) ->Tuple:
"""simple docstring"""
a__ :Optional[int] = self.tokenizer(text=__A , **__A )
a__ :str = self.preprocess_img(__A )
a__ :Any = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowerCAmelCase_ ( nn.Module):
def __init__( self : List[str] , __A : List[str]=10 , __A : List[Any]=0.01 , __A : Optional[int]=None , __A : List[str]=None , __A : Dict=None , __A : List[str]=None , __A : Tuple=None , __A : str=None , __A : Any=False , __A : Union[str, Any]=True , __A : Tuple="image" , __A : Tuple=True , __A : int=False , __A : Tuple=False , __A : Union[str, Any]=False , ) ->None:
"""simple docstring"""
super().__init__()
a__ :List[str] = None
a__ :int = device if device else get_device()
if vqgan:
a__ :Any = vqgan
else:
a__ :Optional[int] = load_vqgan(self.device , conf_path=__A , ckpt_path=__A )
self.vqgan.eval()
if clip:
a__ :Dict = clip
else:
a__ :Any = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
a__ :Optional[int] = ProcessorGradientFlow(device=self.device )
a__ :Any = iterations
a__ :Any = lr
a__ :List[str] = log
a__ :List[Any] = make_grid
a__ :Optional[int] = return_val
a__ :Optional[Any] = quantize
a__ :Any = self.vqgan.decoder.z_shape
def _snake_case ( self : str , __A : str=None , __A : Optional[int]=None , __A : int=5 , __A : Any=True ) ->Optional[int]:
"""simple docstring"""
a__ :Optional[int] = []
if output_path is None:
a__ :Any = "./animation.gif"
if input_path is None:
a__ :Dict = self.save_path
a__ :Dict = sorted(glob(input_path + "/*" ) )
if not len(__A ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(__A ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
a__ :str = total_duration / len(__A )
a__ :str = [frame_duration] * len(__A )
if extend_frames:
a__ :Optional[Any] = 1.5
a__ :Optional[Any] = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(__A ) )
imageio.mimsave(__A , __A , duration=__A )
print(F'''gif saved to {output_path}''' )
def _snake_case ( self : str , __A : Tuple=None , __A : Optional[int]=None ) ->Optional[int]:
"""simple docstring"""
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
a__ :List[Any] = preprocess(Image.open(__A ) , target_image_size=256 ).to(self.device )
a__ :List[str] = preprocess_vqgan(__A )
a__ , *a__ :Union[str, Any] = self.vqgan.encode(__A )
return z
def _snake_case ( self : List[str] , __A : Any ) ->Optional[int]:
"""simple docstring"""
a__ :Dict = self.latent.detach().requires_grad_()
a__ :int = base_latent + transform_vector
if self.quantize:
a__ , *a__ :str = self.vqgan.quantize(__A )
else:
a__ :List[str] = trans_latent
return self.vqgan.decode(__A )
def _snake_case ( self : List[str] , __A : Optional[Any] , __A : Optional[Any] , __A : List[Any]=None ) ->str:
"""simple docstring"""
a__ :Dict = self.clip_preprocessor(text=__A , images=__A , return_tensors="pt" , padding=__A )
a__ :List[str] = self.clip(**__A )
a__ :Optional[int] = clip_outputs.logits_per_image
if weights is not None:
a__ :Tuple = similarity_logits * weights
return similarity_logits.sum()
def _snake_case ( self : List[str] , __A : List[Any] , __A : Optional[int] , __A : Optional[int] ) ->int:
"""simple docstring"""
a__ :str = self._get_clip_similarity(pos_prompts["prompts"] , __A , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
a__ :Optional[int] = self._get_clip_similarity(neg_prompts["prompts"] , __A , weights=neg_prompts["weights"] )
else:
a__ :Tuple = torch.tensor([1] , device=self.device )
a__ :Dict = -torch.log(__A ) + torch.log(__A )
return loss
def _snake_case ( self : int , __A : Dict , __A : Union[str, Any] , __A : List[str] ) ->List[str]:
"""simple docstring"""
a__ :List[Any] = torch.randn_like(self.latent , requires_grad=__A , device=self.device )
a__ :Optional[Any] = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
a__ :Tuple = self._add_vector(__A )
a__ :Dict = loop_post_process(__A )
a__ :str = self._get_CLIP_loss(__A , __A , __A )
print("CLIP loss" , __A )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=__A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _snake_case ( self : Union[str, Any] , __A : Optional[Any] , __A : Union[str, Any] , __A : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
wandb.init(reinit=__A , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
a__ :List[str] = Image.open(__A )
a__ :Any = image.resize((256, 256) )
wandb.log("Original Image" , wandb.Image(__A ) )
def _snake_case ( self : Union[str, Any] , __A : Union[str, Any] ) ->int:
"""simple docstring"""
if not prompts:
return []
a__ :List[str] = []
a__ :Tuple = []
if isinstance(__A , __A ):
a__ :Tuple = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(__A , (tuple, list) ):
a__ :str = prompt[0]
a__ :List[Any] = float(prompt[1] )
elif ":" in prompt:
a__ , a__ :Tuple = prompt.split(":" )
a__ :Dict = float(__A )
else:
a__ :int = prompt
a__ :Any = 1.0
processed_prompts.append(__A )
weights.append(__A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__A , device=self.device ),
}
def _snake_case ( self : Optional[Any] , __A : Any , __A : int=None , __A : int=None , __A : str=True , __A : List[Any]=False , __A : Optional[int]=True , __A : Any=True , __A : Optional[Any]=None , ) ->Optional[Any]:
"""simple docstring"""
if image_path:
a__ :Optional[int] = self._get_latent(__A )
else:
a__ :Dict = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__A , __A , __A )
assert pos_prompts, "You must provide at least one positive prompt."
a__ :int = self.process_prompts(__A )
a__ :Any = self.process_prompts(__A )
if save_final and save_path is None:
a__ :Dict = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(__A ):
os.makedirs(__A )
else:
a__ :str = save_path + "_" + get_timestamp()
os.makedirs(__A )
a__ :Any = save_path
a__ :Optional[int] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(__A ) )
a__ :List[str] = loop_post_process(__A )
for iter, transformed_img in enumerate(self._optimize_CLIP(__A , __A , __A ) ):
if show_intermediate:
show_pil(__A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"Image": wandb.Image(__A )} )
if show_final:
show_pil(__A )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 373 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
def __init__( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : Tuple=True , lowercase_ : str=True , lowercase_ : Union[str, Any]=True , lowercase_ : Any=True , lowercase_ : List[str]=99 , lowercase_ : Optional[int]=32 , lowercase_ : List[str]=2 , lowercase_ : Union[str, Any]=4 , lowercase_ : Optional[Any]=37 , lowercase_ : Tuple="gelu" , lowercase_ : Tuple=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : List[str]=512 , lowercase_ : Any=16 , lowercase_ : Union[str, Any]=2 , lowercase_ : int=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Any=4 , lowercase_ : Dict=None , ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =parent
_lowerCamelCase : Tuple =13
_lowerCamelCase : Optional[Any] =7
_lowerCamelCase : Optional[Any] =True
_lowerCamelCase : Dict =True
_lowerCamelCase : List[str] =True
_lowerCamelCase : Any =True
_lowerCamelCase : List[Any] =99
_lowerCamelCase : Dict =384
_lowerCamelCase : Dict =2
_lowerCamelCase : List[Any] =4
_lowerCamelCase : Dict =37
_lowerCamelCase : Tuple ='gelu'
_lowerCamelCase : Optional[Any] =0.1
_lowerCamelCase : str =0.1
_lowerCamelCase : Tuple =512
_lowerCamelCase : int =16
_lowerCamelCase : List[str] =2
_lowerCamelCase : Optional[int] =0.02
_lowerCamelCase : Optional[Any] =3
_lowerCamelCase : List[Any] =4
_lowerCamelCase : Any =128
_lowerCamelCase : int =2
_lowerCamelCase : Dict =9
_lowerCamelCase : str =1
_lowerCamelCase : Union[str, Any] =None
def lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : int =None
if self.use_input_mask:
_lowerCamelCase : Tuple =random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : List[Any] =None
if self.use_token_type_ids:
_lowerCamelCase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCamelCase : List[str] =None
_lowerCamelCase : List[str] =None
_lowerCamelCase : str =None
if self.use_labels:
_lowerCamelCase : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase : Any =ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase : str =ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowercase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : int ) -> Any:
"""simple docstring"""
_lowerCamelCase : Any =TFConvBertModel(config=lowercase_ )
_lowerCamelCase : Optional[int] ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_lowerCamelCase : int =[input_ids, input_mask]
_lowerCamelCase : List[str] =model(lowercase_ )
_lowerCamelCase : Optional[int] =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : Dict , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : str =TFConvBertForMaskedLM(config=lowercase_ )
_lowerCamelCase : str ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCamelCase : List[Any] =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : List[str] ) -> List[Any]:
"""simple docstring"""
_lowerCamelCase : Tuple =self.num_labels
_lowerCamelCase : Union[str, Any] =TFConvBertForSequenceClassification(config=lowercase_ )
_lowerCamelCase : Dict ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCamelCase : List[str] =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Dict , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Union[str, Any] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[str] =self.num_choices
_lowerCamelCase : Tuple =TFConvBertForMultipleChoice(config=lowercase_ )
_lowerCamelCase : List[str] =tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
_lowerCamelCase : Tuple =tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
_lowerCamelCase : Optional[Any] =tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) )
_lowerCamelCase : List[str] ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_lowerCamelCase : Dict =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self : str , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] ) -> str:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =self.num_labels
_lowerCamelCase : Tuple =TFConvBertForTokenClassification(config=lowercase_ )
_lowerCamelCase : Any ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCamelCase : List[Any] =model(lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[str] ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =TFConvBertForQuestionAnswering(config=lowercase_ )
_lowerCamelCase : Dict ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_lowerCamelCase : Any =model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : int =self.prepare_config_and_inputs()
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : List[str] =config_and_inputs
_lowerCamelCase : Optional[Any] ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : Tuple =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ : int =(
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ : List[Any] =False
UpperCamelCase__ : List[str] =False
UpperCamelCase__ : Optional[Any] =False
def lowerCamelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] =TFConvBertModelTester(self )
_lowerCamelCase : str =ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def lowerCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def lowerCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def lowerCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_lowerCamelCase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def lowerCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def lowerCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def lowerCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : List[Any] =True
_lowerCamelCase : List[str] =True
if hasattr(lowercase_ , 'use_cache' ):
_lowerCamelCase : Any =True
_lowerCamelCase : List[str] =getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
_lowerCamelCase : Tuple =getattr(self.model_tester , 'key_length' , lowercase_ )
for model_class in self.all_model_classes:
_lowerCamelCase : Tuple =self._prepare_for_class(lowercase_ , lowercase_ )
_lowerCamelCase : List[Any] =model_class(lowercase_ )
_lowerCamelCase : Any =len(model(lowercase_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ , saved_model=lowercase_ )
_lowerCamelCase : Any =os.path.join(lowercase_ , 'saved_model' , '1' )
_lowerCamelCase : List[Any] =tf.keras.models.load_model(lowercase_ )
_lowerCamelCase : Tuple =model(lowercase_ )
if self.is_encoder_decoder:
_lowerCamelCase : Dict =outputs['encoder_hidden_states']
_lowerCamelCase : Optional[int] =outputs['encoder_attentions']
else:
_lowerCamelCase : List[str] =outputs['hidden_states']
_lowerCamelCase : Dict =outputs['attentions']
self.assertEqual(len(lowercase_ ) , lowercase_ )
_lowerCamelCase : str =getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowercase_ ) , lowercase_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Union[str, Any] =True
_lowerCamelCase : List[Any] =getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
_lowerCamelCase : Optional[int] =getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
_lowerCamelCase : List[str] =getattr(self.model_tester , 'key_length' , lowercase_ )
_lowerCamelCase : int =getattr(self.model_tester , 'key_length' , lowercase_ )
def check_decoder_attentions_output(lowercase_ : Any ):
_lowerCamelCase : Tuple =len(lowercase_ )
self.assertEqual(out_len % 2 , 0 )
_lowerCamelCase : List[Any] =outputs.decoder_attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowercase_ : Any ):
_lowerCamelCase : List[str] =[
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] =True
_lowerCamelCase : Optional[Any] =False
_lowerCamelCase : Union[str, Any] =model_class(lowercase_ )
_lowerCamelCase : Optional[int] =model(self._prepare_for_class(lowercase_ , lowercase_ ) )
_lowerCamelCase : Union[str, Any] =len(lowercase_ )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
if self.is_encoder_decoder:
_lowerCamelCase : Any =model_class(lowercase_ )
_lowerCamelCase : Optional[Any] =model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_decoder_attentions_output(lowercase_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCamelCase : str =True
_lowerCamelCase : Any =model_class(lowercase_ )
_lowerCamelCase : List[Any] =model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
# Check attention is always last and order is fine
_lowerCamelCase : Optional[int] =True
_lowerCamelCase : Dict =True
_lowerCamelCase : Union[str, Any] =model_class(lowercase_ )
_lowerCamelCase : List[Any] =model(self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase_ ) )
self.assertEqual(model.config.output_hidden_states , lowercase_ )
check_encoder_attentions_output(lowercase_ )
@require_tf
class A ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : str ) -> Dict:
"""simple docstring"""
_lowerCamelCase : Tuple =TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
_lowerCamelCase : Dict =tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCamelCase : str =model(lowercase_ )[0]
_lowerCamelCase : Optional[Any] =[1, 6, 768]
self.assertEqual(output.shape , lowercase_ )
_lowerCamelCase : Optional[Any] =tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowercase_ , atol=1E-4 )
| 464 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = '▁'
lowerCamelCase = {'vocab_file': 'prophetnet.tokenizer'}
lowerCamelCase = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
lowerCamelCase = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
lowerCamelCase = {
'microsoft/xprophetnet-large-wiki100-cased': 5_12,
}
def a_ ( SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
_lowerCamelCase : List[str] =collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as reader:
_lowerCamelCase : Dict =reader.readlines()
for index, token in enumerate(SCREAMING_SNAKE_CASE__ ):
_lowerCamelCase : int =token.rstrip('\n' )
_lowerCamelCase : Tuple =index
return vocab
class A ( UpperCamelCase_ ):
UpperCamelCase__ : str =VOCAB_FILES_NAMES
UpperCamelCase__ : int =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int =['input_ids', 'attention_mask']
def __init__( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str]="[SEP]" , lowercase_ : Any="[SEP]" , lowercase_ : Optional[int]="[SEP]" , lowercase_ : Optional[Any]="[UNK]" , lowercase_ : int="[PAD]" , lowercase_ : Union[str, Any]="[CLS]" , lowercase_ : Optional[int]="[MASK]" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : str , ) -> None:
"""simple docstring"""
_lowerCamelCase : int ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
_lowerCamelCase : Optional[int] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
_lowerCamelCase : Dict =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
_lowerCamelCase : int ={'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(10 ):
_lowerCamelCase : Dict =F'''[unused{i}]'''
_lowerCamelCase : List[str] =5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
_lowerCamelCase : Any =12
_lowerCamelCase : Dict ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(lowercase_ )
def __getstate__( self : int ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Dict =self.__dict__.copy()
_lowerCamelCase : Dict =None
return state
def __setstate__( self : str , lowercase_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] =d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'
' pip install sentencepiece' )
raise
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_lowerCamelCase : Any ={}
_lowerCamelCase : Optional[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return ([0] * len(lowercase_ )) + [1]
return ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) + [1]
def lowerCamelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
_lowerCamelCase : Any =[self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def lowerCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] ={self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase ( self : List[Any] , lowercase_ : str ) -> str:
"""simple docstring"""
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def lowerCamelCase ( self : List[str] , lowercase_ : Dict ) -> str:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase : Optional[int] =self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase ( self : str , lowercase_ : Dict ) -> Any:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase ( self : Any , lowercase_ : List[str] ) -> Tuple:
"""simple docstring"""
_lowerCamelCase : int =''.join(lowercase_ ).replace(lowercase_ , ' ' ).strip()
return out_string
def lowerCamelCase ( self : int , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCamelCase : Dict =os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
_lowerCamelCase : Dict =self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
def lowerCamelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
_lowerCamelCase : Any =[self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 464 | 1 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
UpperCAmelCase_ =(
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ =(
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self , _A , _A , _A=False ) -> Any:
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=32 , _A=2 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ) -> Any:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = embedding_size
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> int:
SCREAMING_SNAKE_CASE_ = TFMobileBertModel(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_ = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = TFMobileBertForMaskedLM(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TFMobileBertForNextSentencePrediction(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = TFMobileBertForPreTraining(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFMobileBertForSequenceClassification(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = self.num_choices
SCREAMING_SNAKE_CASE_ = TFMobileBertForMultipleChoice(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFMobileBertForTokenClassification(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A , _A ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = TFMobileBertForQuestionAnswering(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
SCREAMING_SNAKE_CASE_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE_
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = TFMobileBertModelTest.TFMobileBertModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _UpperCamelCase ( self ) -> Dict:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCamelCase__ )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCamelCase__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCamelCase__ )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCamelCase__ )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCamelCase__ )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCamelCase__ )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCamelCase__ )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCamelCase__ )
@slow
def _UpperCamelCase ( self ) -> Union[str, Any]:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
SCREAMING_SNAKE_CASE_ = TFMobileBertModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_tf
class UpperCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
SCREAMING_SNAKE_CASE_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ = model(UpperCamelCase__ )[0]
SCREAMING_SNAKE_CASE_ = [1, 6, 30522]
self.assertEqual(output.shape , UpperCamelCase__ )
SCREAMING_SNAKE_CASE_ = tf.constant(
[
[
[-4.591_9547, -9.24_8295, -9.64_5256],
[-6.730_6175, -6.44_0284, -6.605_2837],
[-7.274_3506, -6.784_7915, -6.02_4673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 )
| 711 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="wavlm"
def __init__( self , _A=32 , _A=768 , _A=12 , _A=12 , _A=3072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.1 , _A=0.1 , _A=0.02 , _A=1E-5 , _A="group" , _A="gelu" , _A=(512, 512, 512, 512, 512, 512, 512) , _A=(5, 2, 2, 2, 2, 2, 2) , _A=(10, 3, 3, 3, 3, 2, 2) , _A=False , _A=128 , _A=16 , _A=320 , _A=800 , _A=False , _A=True , _A=0.05 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=320 , _A=2 , _A=0.1 , _A=100 , _A=256 , _A=256 , _A=0.1 , _A="mean" , _A=False , _A=False , _A=256 , _A=(512, 512, 512, 512, 1500) , _A=(5, 3, 3, 1, 1) , _A=(1, 2, 3, 1, 1) , _A=512 , _A=80 , _A=0 , _A=1 , _A=2 , _A=False , _A=3 , _A=2 , _A=3 , _A=None , **_A , ) -> str:
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = feat_extract_norm
SCREAMING_SNAKE_CASE_ = feat_extract_activation
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = conv_bias
SCREAMING_SNAKE_CASE_ = num_buckets
SCREAMING_SNAKE_CASE_ = max_bucket_distance
SCREAMING_SNAKE_CASE_ = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE_ = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE_ = len(self.conv_dim )
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = feat_proj_dropout
SCREAMING_SNAKE_CASE_ = final_dropout
SCREAMING_SNAKE_CASE_ = layerdrop
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_ctc_classes
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = do_stable_layer_norm
SCREAMING_SNAKE_CASE_ = use_weighted_layer_sum
SCREAMING_SNAKE_CASE_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE_ = apply_spec_augment
SCREAMING_SNAKE_CASE_ = mask_time_prob
SCREAMING_SNAKE_CASE_ = mask_time_length
SCREAMING_SNAKE_CASE_ = mask_time_min_masks
SCREAMING_SNAKE_CASE_ = mask_feature_prob
SCREAMING_SNAKE_CASE_ = mask_feature_length
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE_ = num_codevectors_per_group
SCREAMING_SNAKE_CASE_ = num_codevector_groups
SCREAMING_SNAKE_CASE_ = contrastive_logits_temperature
SCREAMING_SNAKE_CASE_ = num_negatives
SCREAMING_SNAKE_CASE_ = codevector_dim
SCREAMING_SNAKE_CASE_ = proj_codevector_dim
SCREAMING_SNAKE_CASE_ = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE_ = ctc_loss_reduction
SCREAMING_SNAKE_CASE_ = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE_ = add_adapter
SCREAMING_SNAKE_CASE_ = adapter_kernel_size
SCREAMING_SNAKE_CASE_ = adapter_stride
SCREAMING_SNAKE_CASE_ = num_adapter_layers
SCREAMING_SNAKE_CASE_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = list(_A )
SCREAMING_SNAKE_CASE_ = xvector_output_dim
@property
def _UpperCamelCase ( self ) -> int:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 597 | 0 |
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class A__ :
def __init__( self , UpperCamelCase__ = "cpu" , UpperCamelCase__ = "openai/clip-vit-large-patch14" ) -> None:
'''simple docstring'''
A_ = device
A_ = CLIPTokenizerFast.from_pretrained(UpperCamelCase__ )
A_ = [0.48145466, 0.4578275, 0.40821073]
A_ = [0.26862954, 0.26130258, 0.27577711]
A_ = torchvision.transforms.Normalize(self.image_mean , self.image_std )
A_ = torchvision.transforms.Resize(224 )
A_ = torchvision.transforms.CenterCrop(224 )
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = self.resize(UpperCamelCase__ )
A_ = self.center_crop(UpperCamelCase__ )
A_ = self.normalize(UpperCamelCase__ )
return images
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = self.tokenizer(text=UpperCamelCase__ , **UpperCamelCase__ )
A_ = self.preprocess_img(UpperCamelCase__ )
A_ = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class A__ ( nn.Module ):
def __init__( self , UpperCamelCase__=10 , UpperCamelCase__=0.01 , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__="image" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , ) -> None:
'''simple docstring'''
super().__init__()
A_ = None
A_ = device if device else get_device()
if vqgan:
A_ = vqgan
else:
A_ = load_vqgan(self.device , conf_path=UpperCamelCase__ , ckpt_path=UpperCamelCase__ )
self.vqgan.eval()
if clip:
A_ = clip
else:
A_ = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
A_ = ProcessorGradientFlow(device=self.device )
A_ = iterations
A_ = lr
A_ = log
A_ = make_grid
A_ = return_val
A_ = quantize
A_ = self.vqgan.decoder.z_shape
def snake_case_ ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=5 , UpperCamelCase__=True ) -> List[Any]:
'''simple docstring'''
A_ = []
if output_path is None:
A_ = """./animation.gif"""
if input_path is None:
A_ = self.save_path
A_ = sorted(glob(input_path + """/*""" ) )
if not len(UpperCamelCase__ ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(UpperCamelCase__ ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
A_ = total_duration / len(UpperCamelCase__ )
A_ = [frame_duration] * len(UpperCamelCase__ )
if extend_frames:
A_ = 1.5
A_ = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(UpperCamelCase__ ) )
imageio.mimsave(UpperCamelCase__ , UpperCamelCase__ , duration=UpperCamelCase__ )
print(f'''gif saved to {output_path}''' )
def snake_case_ ( self , UpperCamelCase__=None , UpperCamelCase__=None ) -> Optional[int]:
'''simple docstring'''
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
A_ = preprocess(Image.open(UpperCamelCase__ ) , target_image_size=256 ).to(self.device )
A_ = preprocess_vqgan(UpperCamelCase__ )
A_ , *A_ = self.vqgan.encode(UpperCamelCase__ )
return z
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = self.latent.detach().requires_grad_()
A_ = base_latent + transform_vector
if self.quantize:
A_ , *A_ = self.vqgan.quantize(UpperCamelCase__ )
else:
A_ = trans_latent
return self.vqgan.decode(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> Any:
'''simple docstring'''
A_ = self.clip_preprocessor(text=UpperCamelCase__ , images=UpperCamelCase__ , return_tensors="""pt""" , padding=UpperCamelCase__ )
A_ = self.clip(**UpperCamelCase__ )
A_ = clip_outputs.logits_per_image
if weights is not None:
A_ = similarity_logits * weights
return similarity_logits.sum()
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = self._get_clip_similarity(pos_prompts["""prompts"""] , UpperCamelCase__ , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
A_ = self._get_clip_similarity(neg_prompts["""prompts"""] , UpperCamelCase__ , weights=neg_prompts["""weights"""] )
else:
A_ = torch.tensor([1] , device=self.device )
A_ = -torch.log(UpperCamelCase__ ) + torch.log(UpperCamelCase__ )
return loss
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
A_ = torch.randn_like(self.latent , requires_grad=UpperCamelCase__ , device=self.device )
A_ = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
A_ = self._add_vector(UpperCamelCase__ )
A_ = loop_post_process(UpperCamelCase__ )
A_ = self._get_CLIP_loss(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print("""CLIP loss""" , UpperCamelCase__ )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=UpperCamelCase__ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
wandb.init(reinit=UpperCamelCase__ , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
A_ = Image.open(UpperCamelCase__ )
A_ = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(UpperCamelCase__ ) )
def snake_case_ ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
if not prompts:
return []
A_ = []
A_ = []
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(UpperCamelCase__ , (tuple, list) ):
A_ = prompt[0]
A_ = float(prompt[1] )
elif ":" in prompt:
A_ , A_ = prompt.split(""":""" )
A_ = float(UpperCamelCase__ )
else:
A_ = prompt
A_ = 1.0
processed_prompts.append(UpperCamelCase__ )
weights.append(UpperCamelCase__ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCamelCase__ , device=self.device ),
}
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=None , ) -> List[str]:
'''simple docstring'''
if image_path:
A_ = self._get_latent(UpperCamelCase__ )
else:
A_ = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
assert pos_prompts, "You must provide at least one positive prompt."
A_ = self.process_prompts(UpperCamelCase__ )
A_ = self.process_prompts(UpperCamelCase__ )
if save_final and save_path is None:
A_ = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
else:
A_ = save_path + """_""" + get_timestamp()
os.makedirs(UpperCamelCase__ )
A_ = save_path
A_ = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(UpperCamelCase__ ) )
A_ = loop_post_process(UpperCamelCase__ )
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ):
if show_intermediate:
show_pil(UpperCamelCase__ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(UpperCamelCase__ )} )
if show_final:
show_pil(UpperCamelCase__ )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
| 288 |
'''simple docstring'''
import os
import sys
import unittest
__lowerCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCamelCase = os.path.join(git_repo_path, '''src''', '''transformers''')
__lowerCamelCase = '''
{0} = None
'''
__lowerCamelCase = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
__lowerCamelCase = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(UpperCamelCase__ )
A_ = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(UpperCamelCase__ , """tokenizers""" )
A_ = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(UpperCamelCase__ , """tensorflow_text""" )
A_ = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tokenizers""" )
A_ = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tensorflow_text""" )
A_ = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(UpperCamelCase__ , """sentencepiece_and_tokenizers_and_vision""" )
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , UpperCamelCase__ )
self.assertIn("""tensorflow_text""" , UpperCamelCase__ )
self.assertIn("""sentencepiece_and_tokenizers""" , UpperCamelCase__ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(UpperCamelCase__ , """\nCONSTANT = None\n""" )
A_ = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
UpperCamelCase__ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
A_ = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
A_ = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
A_ = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , UpperCamelCase__ )
| 288 | 1 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __SCREAMING_SNAKE_CASE( a_ ):
def __init__( self: int , UpperCamelCase: List[str]=0.01 , UpperCamelCase: Dict=10_00 ) -> List[Any]:
snake_case__ = p_stop
snake_case__ = max_length
def __iter__( self: Optional[int] ) -> Optional[int]:
snake_case__ = 0
snake_case__ = False
while not stop and count < self.max_length:
yield count
count += 1
snake_case__ = random.random() < self.p_stop
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any]=False , UpperCamelCase: Any=True ) -> List[Any]:
snake_case__ = [
BatchSamplerShard(UpperCamelCase , 2 , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
for i in range(2 )
]
snake_case__ = [list(UpperCamelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(UpperCamelCase ) for shard in batch_sampler_shards] , [len(UpperCamelCase ) for e in expected] )
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]:
# Check the shards when the dataset is a round multiple of total batch size.
snake_case__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
snake_case__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
snake_case__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
snake_case__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
snake_case__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
snake_case__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
snake_case__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
snake_case__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
# Check the shards when the dataset is very small.
snake_case__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase )
snake_case__ = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
snake_case__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase )
snake_case__ = [[], []]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: Tuple ) -> Optional[int]:
# Check the shards when the dataset is a round multiple of batch size.
snake_case__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
snake_case__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
snake_case__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
snake_case__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
snake_case__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
snake_case__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
# Check the shards when the dataset is very small.
snake_case__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase )
snake_case__ = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
snake_case__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase )
snake_case__ = [[], []]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Any:
# Check the shards when the dataset is a round multiple of total batch size.
snake_case__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
snake_case__ = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
snake_case__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
snake_case__ = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
snake_case__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
snake_case__ = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
snake_case__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
snake_case__ = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is very small.
snake_case__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase )
snake_case__ = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
snake_case__ = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase )
snake_case__ = [[], []]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , even_batches=UpperCamelCase )
def lowerCAmelCase_ ( self: Tuple ) -> Any:
# Check the shards when the dataset is a round multiple of batch size.
snake_case__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
snake_case__ = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size.
snake_case__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
snake_case__ = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
snake_case__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
snake_case__ = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase )
snake_case__ = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
# Check the shards when the dataset is very small.
snake_case__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase )
snake_case__ = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
snake_case__ = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase )
snake_case__ = [[], []]
self.check_batch_sampler_shards(UpperCamelCase , UpperCamelCase , split_batches=UpperCamelCase , even_batches=UpperCamelCase )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> str:
snake_case__ = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
snake_case__ = [BatchSamplerShard(UpperCamelCase , 2 , UpperCamelCase , even_batches=UpperCamelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: str=False , UpperCamelCase: Optional[Any]=2 , UpperCamelCase: List[Any]=False ) -> Tuple:
random.seed(UpperCamelCase )
snake_case__ = list(UpperCamelCase )
snake_case__ = [
IterableDatasetShard(
UpperCamelCase , batch_size=UpperCamelCase , drop_last=UpperCamelCase , num_processes=UpperCamelCase , process_index=UpperCamelCase , split_batches=UpperCamelCase , )
for i in range(UpperCamelCase )
]
snake_case__ = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(UpperCamelCase )
iterable_dataset_lists.append(list(UpperCamelCase ) )
snake_case__ = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
snake_case__ = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
self.assertTrue(len(UpperCamelCase ) % shard_batch_size == 0 )
snake_case__ = []
for idx in range(0 , len(UpperCamelCase ) , UpperCamelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(UpperCamelCase ) < len(UpperCamelCase ):
reference += reference
self.assertListEqual(UpperCamelCase , reference[: len(UpperCamelCase )] )
def lowerCAmelCase_ ( self: Dict ) -> Optional[Any]:
snake_case__ = 42
snake_case__ = RandomIterableDataset()
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
# Edge case with a very small dataset
snake_case__ = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
self.check_iterable_dataset_shards(UpperCamelCase , UpperCamelCase , batch_size=4 , drop_last=UpperCamelCase , split_batches=UpperCamelCase )
def lowerCAmelCase_ ( self: List[Any] ) -> List[str]:
snake_case__ = BatchSampler(range(16 ) , batch_size=4 , drop_last=UpperCamelCase )
snake_case__ = SkipBatchSampler(UpperCamelCase , 2 )
self.assertListEqual(list(UpperCamelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCAmelCase_ ( self: List[Any] ) -> Union[str, Any]:
snake_case__ = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
snake_case__ = DataLoader(list(range(16 ) ) , batch_size=4 )
snake_case__ = skip_first_batches(UpperCamelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCAmelCase_ ( self: Any ) -> Dict:
snake_case__ = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowerCAmelCase_ ( self: Optional[int] ) -> Tuple:
Accelerator()
snake_case__ = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCamelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 372 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__UpperCamelCase : Optional[int] = 8
def a_ ( _A , _A=BITS ) -> List[Any]:
"""simple docstring"""
snake_case__ = x.device
snake_case__ = (x * 255).int().clamp(0 , 255 )
snake_case__ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_A )
snake_case__ = rearrange(_A , 'd -> d 1 1' )
snake_case__ = rearrange(_A , 'b c h w -> b c 1 h w' )
snake_case__ = ((x & mask) != 0).float()
snake_case__ = rearrange(_A , 'b c d h w -> b (c d) h w' )
snake_case__ = bits * 2 - 1
return bits
def a_ ( _A , _A=BITS ) -> List[str]:
"""simple docstring"""
snake_case__ = x.device
snake_case__ = (x > 0).int()
snake_case__ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_A , dtype=torch.intaa )
snake_case__ = rearrange(_A , 'd -> d 1 1' )
snake_case__ = rearrange(_A , 'b (c d) h w -> b c d h w' , d=8 )
snake_case__ = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def a_ ( self , _A , _A , _A , _A = 0.0 , _A = True , _A=None , _A = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
snake_case__ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
snake_case__ = self.alphas_cumprod[timestep]
snake_case__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
snake_case__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
snake_case__ = self.bit_scale
if self.config.clip_sample:
snake_case__ = torch.clamp(_A , -scale , _A )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
snake_case__ = self._get_variance(_A , _A )
snake_case__ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
snake_case__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
snake_case__ = model_output.device if torch.is_tensor(_A ) else 'cpu'
snake_case__ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_A ).to(_A )
snake_case__ = self._get_variance(_A , _A ) ** 0.5 * eta * noise
snake_case__ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_A , pred_original_sample=_A )
def a_ ( self , _A , _A , _A , _A="epsilon" , _A=None , _A = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
snake_case__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
snake_case__ , snake_case__ = torch.split(_A , sample.shape[1] , dim=1 )
else:
snake_case__ = None
# 1. compute alphas, betas
snake_case__ = self.alphas_cumprod[t]
snake_case__ = self.alphas_cumprod[t - 1] if t > 0 else self.one
snake_case__ = 1 - alpha_prod_t
snake_case__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
snake_case__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
snake_case__ = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
snake_case__ = self.bit_scale
if self.config.clip_sample:
snake_case__ = torch.clamp(_A , -scale , _A )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
snake_case__ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case__ = 0
if t > 0:
snake_case__ = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_A ).to(model_output.device )
snake_case__ = (self._get_variance(_A , predicted_variance=_A ) ** 0.5) * noise
snake_case__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_A , pred_original_sample=_A )
class __SCREAMING_SNAKE_CASE( a_ ):
def __init__( self: Dict , UpperCamelCase: UNetaDConditionModel , UpperCamelCase: Union[DDIMScheduler, DDPMScheduler] , UpperCamelCase: Optional[float] = 1.0 , ) -> Union[str, Any]:
super().__init__()
snake_case__ = bit_scale
snake_case__ = (
ddim_bit_scheduler_step if isinstance(UpperCamelCase , UpperCamelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase )
@torch.no_grad()
def __call__( self: Dict , UpperCamelCase: Optional[int] = 2_56 , UpperCamelCase: Optional[int] = 2_56 , UpperCamelCase: Optional[int] = 50 , UpperCamelCase: Optional[torch.Generator] = None , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[str] = "pil" , UpperCamelCase: bool = True , **UpperCamelCase: int , ) -> Union[Tuple, ImagePipelineOutput]:
snake_case__ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCamelCase , )
snake_case__ = decimal_to_bits(UpperCamelCase ) * self.bit_scale
snake_case__ = latents.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
snake_case__ = self.unet(UpperCamelCase , UpperCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case__ = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
snake_case__ = bits_to_decimal(UpperCamelCase )
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase )
| 372 | 1 |
from __future__ import annotations
from collections.abc import Iterator
class lowercase__ :
def __init__( self : Dict , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = value
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
class lowercase__ :
def __init__( self : Union[str, Any] , UpperCamelCase__ : Node ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = tree
def __A ( self : List[Any] , UpperCamelCase__ : Node | None ):
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : str ):
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 248 | import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowercase__ :
def __init__( self : List[str] , UpperCamelCase__ : Tuple=None , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = model
SCREAMING_SNAKE_CASE : Dict = kwargs.get('''model_save_dir''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = kwargs.get('''latest_model_name''' , UpperCamelCase__ )
def __call__( self : str , **UpperCamelCase__ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {k: np.array(UpperCamelCase__ ) for k, v in kwargs.items()}
return self.model.run(UpperCamelCase__ , UpperCamelCase__ )
@staticmethod
def __A ( UpperCamelCase__ : Union[str, Path] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict=None ):
'''simple docstring'''
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''' )
SCREAMING_SNAKE_CASE : List[str] = '''CPUExecutionProvider'''
return ort.InferenceSession(UpperCamelCase__ , providers=[provider] , sess_options=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Path] , UpperCamelCase__ : Optional[str] = None , **UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ).joinpath(UpperCamelCase__ )
try:
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
SCREAMING_SNAKE_CASE : Tuple = self.model_save_dir.joinpath(UpperCamelCase__ )
if src_path.exists():
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(UpperCamelCase__ ).joinpath(UpperCamelCase__ )
try:
shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ )
except shutil.SameFileError:
pass
def __A ( self : Union[str, Any] , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : Tuple , ):
'''simple docstring'''
if os.path.isfile(UpperCamelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# saving model weights/files
self._save_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def __A ( cls : Tuple , UpperCamelCase__ : Union[str, Path] , UpperCamelCase__ : Optional[Union[bool, str, None]] = None , UpperCamelCase__ : Optional[Union[str, None]] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional["ort.SessionOptions"] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = OnnxRuntimeModel.load_model(
os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , provider=UpperCamelCase__ , sess_options=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = Path(UpperCamelCase__ )
# load model from hub
else:
# download model
SCREAMING_SNAKE_CASE : int = hf_hub_download(
repo_id=UpperCamelCase__ , filename=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , revision=UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Dict = Path(UpperCamelCase__ ).parent
SCREAMING_SNAKE_CASE : List[Any] = Path(UpperCamelCase__ ).name
SCREAMING_SNAKE_CASE : str = OnnxRuntimeModel.load_model(UpperCamelCase__ , provider=UpperCamelCase__ , sess_options=UpperCamelCase__ )
return cls(model=UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def __A ( cls : List[Any] , UpperCamelCase__ : Union[str, Path] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = None
if len(str(UpperCamelCase__ ).split('''@''' ) ) == 2:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = model_id.split('''@''' )
return cls._from_pretrained(
model_id=UpperCamelCase__ , revision=UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , **UpperCamelCase__ , )
| 248 | 1 |
def lowerCamelCase_ ( A : list ):
"""simple docstring"""
def merge(A : list , A : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(A ) <= 1:
return collection
lowerCAmelCase_ = len(A ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 413 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_snake_case = logging.get_logger(__name__)
_snake_case = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase=None , **_UpperCAmelCase):
logger.info('''`diffusers.OnnxRuntimeModel` is experimental and might change in the future.''')
lowerCAmelCase_ = model
lowerCAmelCase_ = kwargs.get('''model_save_dir''' , _UpperCAmelCase)
lowerCAmelCase_ = kwargs.get('''latest_model_name''' , _UpperCAmelCase)
def __call__( self , **_UpperCAmelCase):
lowerCAmelCase_ = {k: np.array(_UpperCAmelCase) for k, v in kwargs.items()}
return self.model.run(_UpperCAmelCase , _UpperCAmelCase)
@staticmethod
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None):
if provider is None:
logger.info('''No onnxruntime provider specified, using CPUExecutionProvider''')
lowerCAmelCase_ = '''CPUExecutionProvider'''
return ort.InferenceSession(_UpperCAmelCase , providers=[provider] , sess_options=_UpperCAmelCase)
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase):
lowerCAmelCase_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
lowerCAmelCase_ = self.model_save_dir.joinpath(self.latest_model_name)
lowerCAmelCase_ = Path(_UpperCAmelCase).joinpath(_UpperCAmelCase)
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase)
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
lowerCAmelCase_ = self.model_save_dir.joinpath(_UpperCAmelCase)
if src_path.exists():
lowerCAmelCase_ = Path(_UpperCAmelCase).joinpath(_UpperCAmelCase)
try:
shutil.copyfile(_UpperCAmelCase , _UpperCAmelCase)
except shutil.SameFileError:
pass
def lowercase__ ( self , _UpperCAmelCase , **_UpperCAmelCase , ):
if os.path.isfile(_UpperCAmelCase):
logger.error(f'Provided path ({save_directory}) should be a directory, not a file')
return
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
# saving model weights/files
self._save_pretrained(_UpperCAmelCase , **_UpperCAmelCase)
@classmethod
def lowercase__ ( cls , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowerCAmelCase_ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCAmelCase):
lowerCAmelCase_ = OnnxRuntimeModel.load_model(
os.path.join(_UpperCAmelCase , _UpperCAmelCase) , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase)
lowerCAmelCase_ = Path(_UpperCAmelCase)
# load model from hub
else:
# download model
lowerCAmelCase_ = hf_hub_download(
repo_id=_UpperCAmelCase , filename=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , )
lowerCAmelCase_ = Path(_UpperCAmelCase).parent
lowerCAmelCase_ = Path(_UpperCAmelCase).name
lowerCAmelCase_ = OnnxRuntimeModel.load_model(_UpperCAmelCase , provider=_UpperCAmelCase , sess_options=_UpperCAmelCase)
return cls(model=_UpperCAmelCase , **_UpperCAmelCase)
@classmethod
def lowercase__ ( cls , _UpperCAmelCase , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
lowerCAmelCase_ = None
if len(str(_UpperCAmelCase).split('''@''')) == 2:
lowerCAmelCase_ , lowerCAmelCase_ = model_id.split('''@''')
return cls._from_pretrained(
model_id=_UpperCAmelCase , revision=_UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , **_UpperCAmelCase , )
| 413 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.