code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class A ( unittest.TestCase ):
def __init__( self , lowerCamelCase__ , lowerCamelCase__=7 , lowerCamelCase__=3 , lowerCamelCase__=18 , lowerCamelCase__=30 , lowerCamelCase__=400 , lowerCamelCase__=True , lowerCamelCase__=None , lowerCamelCase__=True , ) -> List[str]:
'''simple docstring'''
lowercase__ = size if size is not None else {"""height""": 18, """width""": 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_normalize
def A__ ( self ) -> List[Any]:
'''simple docstring'''
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class A ( __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = ImageGPTImageProcessingTester(self )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , """clusters""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase__ , """do_normalize""" ) )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
lowercase__ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowerCamelCase__ )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = os.path.join(lowerCamelCase__ , """image_processor.json""" )
image_processor_first.to_json_file(lowerCamelCase__ )
lowercase__ = self.image_processing_class.from_json_file(lowerCamelCase__ ).to_dict()
lowercase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase__ )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowerCamelCase__ )
lowercase__ = self.image_processing_class.from_pretrained(lowerCamelCase__ ).to_dict()
lowercase__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowerCamelCase__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowerCamelCase__ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def _A ( ):
lowercase__ = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
lowercase__ = Image.open(dataset[4]["""file"""] )
lowercase__ = Image.open(dataset[5]["""file"""] )
lowercase__ = [imagea, imagea]
return images
@require_vision
@require_torch
class A ( unittest.TestCase ):
@slow
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
lowercase__ = prepare_images()
# test non-batched
lowercase__ = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_024) )
lowercase__ = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCamelCase__ )
# test batched
lowercase__ = image_processing(lowerCamelCase__ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_024) )
lowercase__ = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCamelCase__ )
| 325 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class A ( __UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = """ctrl"""
lowerCamelCase : Optional[int] = ["""past_key_values"""]
lowerCamelCase : Optional[int] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCamelCase__=246_534 , lowerCamelCase__=256 , lowerCamelCase__=1_280 , lowerCamelCase__=8_192 , lowerCamelCase__=48 , lowerCamelCase__=16 , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1e-6 , lowerCamelCase__=0.02 , lowerCamelCase__=True , **lowerCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = n_positions
lowercase__ = n_embd
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = dff
lowercase__ = resid_pdrop
lowercase__ = embd_pdrop
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
super().__init__(**lowerCamelCase__ )
| 325 | 1 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=False , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.02 , a__=3 , a__=4 , a__=None , ) -> Any:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = scope
def _UpperCAmelCase ( self ) -> int:
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> int:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[int]:
A = DistilBertModel(config=a__ )
model.to(a__ )
model.eval()
A = model(a__ , a__ )
A = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ ) -> int:
A = DistilBertForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
A = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ ) -> Tuple:
A = DistilBertForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
A = model(
a__ , attention_mask=a__ , start_positions=a__ , end_positions=a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[int]:
A = self.num_labels
A = DistilBertForSequenceClassification(a__ )
model.to(a__ )
model.eval()
A = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[Any]:
A = self.num_labels
A = DistilBertForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
A = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ ) -> List[Any]:
A = self.num_choices
A = DistilBertForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
A = model(
a__ , attention_mask=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> Optional[int]:
A = self.prepare_config_and_inputs()
((A) , (A) , (A) , (A) , (A) , (A)) = config_and_inputs
A = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCAmelCase = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
def _UpperCAmelCase ( self ) -> List[Any]:
A = DistilBertModelTester(self )
A = ConfigTester(self , config_class=a__ , dim=37 )
def _UpperCAmelCase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*a__ )
def _UpperCAmelCase ( self ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*a__ )
def _UpperCAmelCase ( self ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*a__ )
def _UpperCAmelCase ( self ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*a__ )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*a__ )
def _UpperCAmelCase ( self ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*a__ )
@slow
def _UpperCAmelCase ( self ) -> Any:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = DistilBertModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@slow
@require_torch_gpu
def _UpperCAmelCase ( self ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
A = True
A = model_class(config=a__ )
A = self._prepare_for_class(a__ , a__ )
A = torch.jit.trace(
a__ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a__ , os.path.join(a__ , """traced_model.pt""" ) )
A = torch.jit.load(os.path.join(a__ , """traced_model.pt""" ) , map_location=a__ )
loaded(inputs_dict["""input_ids"""].to(a__ ) , inputs_dict["""attention_mask"""].to(a__ ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self ) -> List[str]:
A = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
A = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A = model(a__ , attention_mask=a__ )[0]
A = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a__ )
A = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) )
| 708 |
import os
def _lowerCAmelCase ( UpperCamelCase__: str = "matrix.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(UpperCamelCase__ ) , UpperCamelCase__ ) ) as in_file:
A = in_file.read()
A = [[int(UpperCamelCase__ ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
A = [[0 for cell in row] for row in grid]
A = len(grid[0] )
A = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
A = grid[0][0]
for i in range(1 , UpperCamelCase__ ):
A = grid[0][i] + dp[0][i - 1]
for i in range(1 , UpperCamelCase__ ):
A = grid[i][0] + dp[i - 1][0]
for i in range(1 , UpperCamelCase__ ):
for j in range(1 , UpperCamelCase__ ):
A = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 546 | 0 |
"""simple docstring"""
from collections import defaultdict
def __A ( a_ :int) -> int:
__a : Dict = 1
__a : Any = True
for v in tree[start]:
if v not in visited:
ret += dfs(a_)
if ret % 2 == 0:
cuts.append(a_)
return ret
def __A ( ) -> List[Any]:
dfs(1)
if __name__ == "__main__":
A , A = 10, 9
A = defaultdict(list)
A = {}
A = []
A = 0
A = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1) | 52 | def __lowercase ( lowerCamelCase : str , lowerCamelCase : str ):
UpperCamelCase_ : Dict = len(lowerCamelCase )
UpperCamelCase_ : Union[str, Any] = len(lowerCamelCase )
UpperCamelCase_ : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCamelCase_ : Tuple = True
for i in range(lowerCamelCase ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCamelCase_ : str = True
if a[i].islower():
UpperCamelCase_ : List[str] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 417 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''facebook/xlm-roberta-xl''': '''https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json''',
'''facebook/xlm-roberta-xxl''': '''https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json''',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class snake_case_ ( SCREAMING_SNAKE_CASE__):
'''simple docstring'''
lowerCamelCase :List[Any] = "xlm-roberta-xl"
def __init__( self , __lowercase=2_5_0_8_8_0 , __lowercase=2_5_6_0 , __lowercase=3_6 , __lowercase=3_2 , __lowercase=1_0_2_4_0 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_4 , __lowercase=1 , __lowercase=0.0_2 , __lowercase=1e-05 , __lowercase=1 , __lowercase=0 , __lowercase=2 , __lowercase="absolute" , __lowercase=True , __lowercase=None , **__lowercase , ) -> str:
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
lowerCamelCase : Union[str, Any] =vocab_size
lowerCamelCase : str =hidden_size
lowerCamelCase : List[Any] =num_hidden_layers
lowerCamelCase : int =num_attention_heads
lowerCamelCase : int =hidden_act
lowerCamelCase : Dict =intermediate_size
lowerCamelCase : Union[str, Any] =hidden_dropout_prob
lowerCamelCase : List[str] =attention_probs_dropout_prob
lowerCamelCase : str =max_position_embeddings
lowerCamelCase : str =type_vocab_size
lowerCamelCase : Optional[int] =initializer_range
lowerCamelCase : Tuple =layer_norm_eps
lowerCamelCase : List[str] =position_embedding_type
lowerCamelCase : List[str] =use_cache
lowerCamelCase : Optional[int] =classifier_dropout
class snake_case_ ( SCREAMING_SNAKE_CASE__):
'''simple docstring'''
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase : List[str] ={0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase : Optional[Any] ={0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 721 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class snake_case_ ( _A):
def __init__( self , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = False , __lowercase = False , __lowercase = None , **__lowercase , ) -> List[Any]:
lowerCamelCase : Dict =path_or_paths
lowerCamelCase : Optional[int] =split if split or isinstance(__lowercase , __lowercase ) else '''train'''
lowerCamelCase : Tuple =features
lowerCamelCase : Tuple =cache_dir
lowerCamelCase : Optional[Any] =keep_in_memory
lowerCamelCase : Any =streaming
lowerCamelCase : Dict =num_proc
lowerCamelCase : List[Any] =kwargs
@abstractmethod
def __lowercase ( self ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class snake_case_ ( _A):
def __init__( self , __lowercase = None , __lowercase = None , __lowercase = False , __lowercase = False , __lowercase = None , **__lowercase , ) -> List[str]:
lowerCamelCase : Tuple =features
lowerCamelCase : Union[str, Any] =cache_dir
lowerCamelCase : Optional[int] =keep_in_memory
lowerCamelCase : List[str] =streaming
lowerCamelCase : List[str] =num_proc
lowerCamelCase : List[str] =kwargs
@abstractmethod
def __lowercase ( self ) -> Union[Dataset, IterableDataset]:
pass
| 262 | 0 |
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
snake_case : Dict = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
snake_case : Optional[int] = self.dummy_uncond_unet
snake_case : Dict = PNDMScheduler()
snake_case : Dict = PNDMPipeline(unet=_A , scheduler=_A )
pndm.to(_A )
pndm.set_progress_bar_config(disable=_A )
snake_case : Union[str, Any] = torch.manual_seed(0 )
snake_case : Any = pndm(generator=_A , num_inference_steps=20 , output_type='''numpy''' ).images
snake_case : Any = torch.manual_seed(0 )
snake_case : Optional[int] = pndm(generator=_A , num_inference_steps=20 , output_type='''numpy''' , return_dict=_A )[0]
snake_case : Any = image[0, -3:, -3:, -1]
snake_case : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case : List[str] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class a_ ( unittest.TestCase ):
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[Any] = """google/ddpm-cifar10-32"""
snake_case : int = UNetaDModel.from_pretrained(_A )
snake_case : Union[str, Any] = PNDMScheduler()
snake_case : List[Any] = PNDMPipeline(unet=_A , scheduler=_A )
pndm.to(_A )
pndm.set_progress_bar_config(disable=_A )
snake_case : Dict = torch.manual_seed(0 )
snake_case : Any = pndm(generator=_A , output_type='''numpy''' ).images
snake_case : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case : Optional[Any] = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 598 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCAmelCase ( __a , unittest.TestCase):
__a : Any = CTRLTokenizer
__a : Any = False
__a : str = False
def __snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase : int = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
_UpperCAmelCase : Any = dict(zip(_A , range(len(_A ) ) ) )
_UpperCAmelCase : Optional[Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
_UpperCAmelCase : int = {"""unk_token""": """<unk>"""}
_UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_A ) )
def __snake_case ( self , **_A ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_A )
def __snake_case ( self , _A ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : str = """adapt react readapt apt"""
_UpperCAmelCase : List[str] = """adapt react readapt apt"""
return input_text, output_text
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase : Optional[Any] = """adapt react readapt apt"""
_UpperCAmelCase : int = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
_UpperCAmelCase : Any = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCAmelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_UpperCAmelCase : Dict = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
| 238 | 0 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_lowercase : Union[str, Any] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : bool = field(default=_lowerCAmelCase , metadata={"help": "Whether to use SortishSampler or not."} )
a__ : bool = field(
default=_lowerCAmelCase , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
a__ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
a__ : Optional[int] = field(
default=_lowerCAmelCase , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
a__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=_lowerCAmelCase , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def a ( self : List[Any] ):
__UpperCAmelCase = super().to_dict()
for k, v in d.items():
if isinstance(_lowercase , _lowercase ):
__UpperCAmelCase = v.to_dict()
return d
| 721 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_lowercase : int = logging.get_logger(__name__)
_lowercase : List[str] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_lowercase : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a__ : str = field(
default=_lowerCAmelCase , metadata={"help": "Model type selected in the list: " + ", ".join(_lowerCAmelCase )} )
a__ : str = field(
default=_lowerCAmelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
a__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a__ : int = field(
default=128 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
a__ : int = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
a__ : int = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
a__ : bool = field(
default=_lowerCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
a__ : bool = field(
default=_lowerCAmelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
a__ : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
a__ : int = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
a__ : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
a__ : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = "train"
a__ : List[Any] = "dev"
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : SquadDataTrainingArguments
a__ : List[SquadFeatures]
a__ : Split
a__ : bool
def __init__( self : Optional[Any] , _lowercase : SquadDataTrainingArguments , _lowercase : PreTrainedTokenizer , _lowercase : Optional[int] = None , _lowercase : Union[str, Split] = Split.train , _lowercase : Optional[bool] = False , _lowercase : Optional[str] = None , _lowercase : Optional[str] = "pt" , ):
__UpperCAmelCase = args
__UpperCAmelCase = is_language_sensitive
__UpperCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
__UpperCAmelCase = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
__UpperCAmelCase = mode
# Load data features from cache or dataset file
__UpperCAmelCase = '''v2''' if args.version_2_with_negative else '''v1'''
__UpperCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__UpperCAmelCase = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
__UpperCAmelCase = time.time()
__UpperCAmelCase = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__UpperCAmelCase = self.old_features['''features''']
__UpperCAmelCase = self.old_features.get('''dataset''' , _lowercase )
__UpperCAmelCase = self.old_features.get('''examples''' , _lowercase )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
''' future run''' )
else:
if mode == Split.dev:
__UpperCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__UpperCAmelCase = self.processor.get_train_examples(args.data_dir )
__UpperCAmelCase , __UpperCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
__UpperCAmelCase = time.time()
torch.save(
{'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Any , _lowercase : Optional[int] ):
# Convert to Tensors and build dataset
__UpperCAmelCase = self.features[i]
__UpperCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
__UpperCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
__UpperCAmelCase = {
'''input_ids''': input_ids,
'''attention_mask''': attention_mask,
'''token_type_ids''': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'''is_impossible''': is_impossible} )
if self.is_language_sensitive:
inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__UpperCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
__UpperCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} )
return inputs
| 397 | 0 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
__snake_case : List[Any] =False
try:
__snake_case : Optional[Any] =_is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase = None ,__lowerCamelCase = [] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : List[str] = choices
lowerCAmelCase__ : Union[str, Any] = prompt
if sys.platform == "win32":
lowerCAmelCase__ : str = '''*'''
else:
lowerCAmelCase__ : Union[str, Any] = '''➔ '''
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = "" ) -> str:
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] ,32 ,__lowerCamelCase )
else:
forceWrite(self.choices[index] ,__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(__lowerCamelCase )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = 1 ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(__lowerCamelCase )
move_cursor(__lowerCamelCase ,direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['''up'''] )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP['''down'''] )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['''newline'''] )
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position ,'''DOWN''' )
return self.position
@input.mark(KEYMAP['''interrupt'''] )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position ,'''DOWN''' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(__lowerCamelCase )] for number in range(10 )] )
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = int(chr(self.current_selection ) )
lowerCAmelCase__ : List[str] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP ,-movement )
elif self.position < index:
self.move_direction(Direction.DOWN ,__lowerCamelCase )
else:
return
else:
return
def lowerCAmelCase__ (self ,__lowerCamelCase = 0 ) -> List[str]:
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt ,'''\n''' )
if in_colab:
forceWrite('''Please input a choice index (starting from 0), and press enter''' ,'''\n''' )
else:
forceWrite('''Please select a choice using the arrow or number keys, and selecting with enter''' ,'''\n''' )
lowerCAmelCase__ : List[str] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(__lowerCamelCase )
forceWrite('''\n''' )
move_cursor(len(self.choices ) - self.position ,'''UP''' )
with cursor.hide():
while True:
if in_colab:
try:
lowerCAmelCase__ : int = int(builtins.input() )
except ValueError:
lowerCAmelCase__ : List[str] = default_choice
else:
lowerCAmelCase__ : str = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 ,'''UP''' )
clear_line()
self.write_choice(__lowerCamelCase ,'''\n''' )
return choice
| 647 |
import requests
__snake_case : Optional[int] ='YOUR API KEY'
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : str = giphy_api_key):
'''simple docstring'''
lowerCAmelCase__ : Tuple = '''+'''.join(query.split())
lowerCAmelCase__ : Optional[Any] = f"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
lowerCAmelCase__ : Any = requests.get(lowerCamelCase_).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 647 | 1 |
_a = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_a = [{"type": "code", "content": INSTALL_CONTENT}]
_a = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 707 |
from __future__ import annotations
def lowerCAmelCase__(__snake_case ,__snake_case = None ,__snake_case = None ) -> None:
'''simple docstring'''
if start is None:
lowerCamelCase__ = 0
if end is None:
lowerCamelCase__ = len(__snake_case ) - 1
if start >= end:
return
lowerCamelCase__ = (start + end) // 2
slowsort(__snake_case ,__snake_case ,__snake_case )
slowsort(__snake_case ,mid + 1 ,__snake_case )
if sequence[end] < sequence[mid]:
lowerCamelCase__ , lowerCamelCase__ = sequence[mid], sequence[end]
slowsort(__snake_case ,__snake_case ,end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 29 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {"vocab_file": "spiece.model"}
__snake_case = {
"vocab_file": {
"bert_for_seq_generation": (
"https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model"
),
}
}
__snake_case = {"bert_for_seq_generation": 512}
class UpperCAmelCase ( __snake_case ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = []
lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self : Tuple , __magic_name__ : Tuple , __magic_name__ : Optional[int]="<s>" , __magic_name__ : Any="</s>" , __magic_name__ : List[Any]="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : Dict="<::::>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Optional[Any] , ):
"""simple docstring"""
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , sep_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__magic_name__ )
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self : Union[str, Any] , __magic_name__ : Tuple ):
"""simple docstring"""
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase_ ( self : List[str] , __magic_name__ : str ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def lowerCamelCase_ ( self : Tuple , __magic_name__ : int ):
"""simple docstring"""
return self.sp_model.piece_to_id(__magic_name__ )
def lowerCamelCase_ ( self : Dict , __magic_name__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.sp_model.IdToPiece(__magic_name__ )
return token
def lowerCamelCase_ ( self : Any , __magic_name__ : Tuple ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__magic_name__ ) + token
UpperCamelCase = []
else:
current_sub_tokens.append(__magic_name__ )
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def lowerCamelCase_ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , """wb""" ) as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
| 386 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def _lowercase ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 16 ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(SCREAMING_SNAKE_CASE_ : str ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(SCREAMING_SNAKE_CASE_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding="""longest""" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case = mocked_dataloaders # noqa: F811
def _lowercase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , SCREAMING_SNAKE_CASE_ ) == "1":
UpperCamelCase = 2
# New Code #
UpperCamelCase = int(args.gradient_accumulation_steps )
UpperCamelCase = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config["""lr"""]
UpperCamelCase = int(config["""num_epochs"""] )
UpperCamelCase = int(config["""seed"""] )
UpperCamelCase = int(config["""batch_size"""] )
UpperCamelCase = evaluate.load("""glue""" , """mrpc""" )
set_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
with LocalSGD(
accelerator=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , local_sgd_steps=SCREAMING_SNAKE_CASE_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , SCREAMING_SNAKE_CASE_ )
def _lowercase ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=SCREAMING_SNAKE_CASE_ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 386 | 1 |
'''simple docstring'''
def _snake_case ( A_ : int = 6008_5147_5143 ):
"""simple docstring"""
try:
a_ : Tuple = int(A_ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
a_ : int = 2
a_ : str = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
a_ : Dict = i
while n % i == 0:
a_ : Any = n // i
i += 1
return int(A_ )
if __name__ == "__main__":
print(F"""{solution() = }""") | 709 |
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase__ ,unittest.TestCase ):
"""simple docstring"""
a_ = PriorTransformer
a_ = "hidden_states"
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = 4
a_ : List[str] = 8
a_ : List[str] = 7
a_ : Tuple = floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
a_ : List[Any] = floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
a_ : List[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowerCAmelCase ( self , lowerCAmelCase_=0 ):
'''simple docstring'''
torch.manual_seed(lowerCAmelCase_ )
a_ : List[Any] = 4
a_ : Union[str, Any] = 8
a_ : Optional[int] = 7
a_ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
a_ : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
a_ : Optional[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return (4, 8)
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return (4, 8)
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
a_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : Optional[Any] = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowerCAmelCase_ )
a_ : List[Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : Optional[int] = self.prepare_init_args_and_inputs_for_common()
a_ : str = self.model_class(**lowerCAmelCase_ )
a_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : str = [*signature.parameters.keys()]
a_ : str = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
a_ : Optional[Any] = model.to(lowerCAmelCase_ )
if hasattr(lowerCAmelCase_ , """set_default_attn_processor""" ):
model.set_default_attn_processor()
a_ : Tuple = self.get_dummy_seed_input()
with torch.no_grad():
a_ : List[str] = model(**lowerCAmelCase_ )[0]
a_ : Optional[Any] = output[0, :5].flatten().cpu()
print(lowerCAmelCase_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
a_ : List[str] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1E-2 ) )
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self , lowerCAmelCase_=1 , lowerCAmelCase_=7_68 , lowerCAmelCase_=77 , lowerCAmelCase_=0 ):
'''simple docstring'''
torch.manual_seed(lowerCAmelCase_ )
a_ : str = batch_size
a_ : Dict = embedding_dim
a_ : Tuple = num_embeddings
a_ : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
a_ : List[str] = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
a_ : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Any = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(lowerCAmelCase_ )
a_ : Optional[int] = self.get_dummy_seed_input(seed=lowerCAmelCase_ )
with torch.no_grad():
a_ : str = model(**lowerCAmelCase_ )[0]
assert list(sample.shape ) == [1, 7_68]
a_ : int = sample[0, :8].flatten().cpu()
print(lowerCAmelCase_ )
a_ : str = torch.tensor(lowerCAmelCase_ )
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 )
| 460 | 0 |
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase_ = 2
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , *, # begin keyword-only arguments
_a="<s>" , _a="<pad>" , _a="</s>" , _a="<unk>" , _a=None , ):
__a , __a , __a , __a = bos, unk, pad, eos
__a = []
__a = []
__a = {}
__a = self.add_symbol(_a )
__a = self.add_symbol(_a )
__a = self.add_symbol(_a )
__a = self.add_symbol(_a )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(_a )
__a = len(self.symbols )
def __eq__( self , _a ):
return self.indices == other.indices
def __getitem__( self , _a ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
return len(self.symbols )
def __contains__( self , _a ):
return sym in self.indices
@classmethod
def __UpperCAmelCase ( cls , _a ):
__a = cls()
d.add_from_file(_a )
return d
def __UpperCAmelCase ( self , _a , _a=1 , _a=False ):
if word in self.indices and not overwrite:
__a = self.indices[word]
__a = self.count[idx] + n
return idx
else:
__a = len(self.symbols )
__a = idx
self.symbols.append(_a )
self.count.append(_a )
return idx
def __UpperCAmelCase ( self , _a ):
return 0
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , _a ):
try:
with open(_a , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(_a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(_a ) )
return
__a = f.readlines()
__a = self._load_meta(_a )
for line in lines[indices_start_line:]:
try:
__a , __a = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
__a = True
__a , __a = line.rsplit(''' ''' , 1 )
else:
__a = False
__a = int(_a )
__a = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(_a ) )
self.add_symbol(_a , n=_a , overwrite=_a )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> str:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__a = dict((re.sub(r'''@@$''' , '''''' , lowerCAmelCase__ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , lowerCAmelCase__ ), v) for k, v in d.items() )
__a = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
__a = d[k] # restore
return da
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> Optional[int]:
# prep
if not os.path.exists(lowerCAmelCase__ ):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
__a = os.path.join(lowerCAmelCase__ , '''checkpoint.pt''' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' )
__a = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
__a = chkpt['''cfg''']['''model''']
# dicts
__a = os.path.join(lowerCAmelCase__ , '''dict.txt''' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f'''path to the file {dict_file} does not exist!''' )
__a = Dictionary.load(lowerCAmelCase__ )
__a = rewrite_dict_keys(src_dict.indices )
__a = len(lowerCAmelCase__ )
__a = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# merges_file (bpecodes)
__a = os.path.join(lowerCAmelCase__ , '''bpecodes''' )
if not os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' )
__a = os.path.join(lowerCAmelCase__ , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
# model config
__a = os.path.join(lowerCAmelCase__ , '''config.json''' )
__a = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-1_2,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# tokenizer config
__a = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
__a = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 1024,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f'''Generating {biogpt_tokenizer_config_file}''' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ , indent=lowerCAmelCase__ ) )
# model
__a = chkpt['''model''']
# remove unneeded keys
__a = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
__a = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
__a = model_state_dict.pop(lowerCAmelCase__ )
else:
__a = model_state_dict.pop(lowerCAmelCase__ )
__a = BioGptConfig.from_pretrained(lowerCAmelCase__ )
__a = BioGptForCausalLM(lowerCAmelCase__ )
# check that it loads ok
model_new.load_state_dict(lowerCAmelCase__ )
# save
__a = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
print('''Conversion is done!''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--biogpt_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
" bpecodes, etc."
),
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase_ = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 695 |
"""simple docstring"""
from __future__ import annotations
import requests
def lowercase ( lowerCAmelCase__ : str ) -> dict:
__a = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(lowerCAmelCase__ ).json()
def lowercase ( lowerCAmelCase__ : int = 10 ) -> list[dict]:
__a = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
__a = requests.get(lowerCAmelCase__ ).json()[:max_stories]
return [get_hackernews_story(lowerCAmelCase__ ) for story_id in story_ids]
def lowercase ( lowerCAmelCase__ : int = 10 ) -> str:
__a = hackernews_top_stories(lowerCAmelCase__ )
return "\n".join('''* [{title}]({url})'''.format(**lowerCAmelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 695 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def A_ ( lowercase_ , lowercase_ , lowercase_ = 1 / sqrt(2 ) ) ->IIRFilter:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tau * frequency / samplerate
SCREAMING_SNAKE_CASE = sin(lowercase_ )
SCREAMING_SNAKE_CASE = cos(lowercase_ )
SCREAMING_SNAKE_CASE = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE = (1 - _cos) / 2
SCREAMING_SNAKE_CASE = 1 - _cos
SCREAMING_SNAKE_CASE = 1 + alpha
SCREAMING_SNAKE_CASE = -2 * _cos
SCREAMING_SNAKE_CASE = 1 - alpha
SCREAMING_SNAKE_CASE = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A_ ( lowercase_ , lowercase_ , lowercase_ = 1 / sqrt(2 ) ) ->IIRFilter:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tau * frequency / samplerate
SCREAMING_SNAKE_CASE = sin(lowercase_ )
SCREAMING_SNAKE_CASE = cos(lowercase_ )
SCREAMING_SNAKE_CASE = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE = (1 + _cos) / 2
SCREAMING_SNAKE_CASE = -1 - _cos
SCREAMING_SNAKE_CASE = 1 + alpha
SCREAMING_SNAKE_CASE = -2 * _cos
SCREAMING_SNAKE_CASE = 1 - alpha
SCREAMING_SNAKE_CASE = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A_ ( lowercase_ , lowercase_ , lowercase_ = 1 / sqrt(2 ) ) ->IIRFilter:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tau * frequency / samplerate
SCREAMING_SNAKE_CASE = sin(lowercase_ )
SCREAMING_SNAKE_CASE = cos(lowercase_ )
SCREAMING_SNAKE_CASE = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE = _sin / 2
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = -ba
SCREAMING_SNAKE_CASE = 1 + alpha
SCREAMING_SNAKE_CASE = -2 * _cos
SCREAMING_SNAKE_CASE = 1 - alpha
SCREAMING_SNAKE_CASE = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A_ ( lowercase_ , lowercase_ , lowercase_ = 1 / sqrt(2 ) ) ->IIRFilter:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tau * frequency / samplerate
SCREAMING_SNAKE_CASE = sin(lowercase_ )
SCREAMING_SNAKE_CASE = cos(lowercase_ )
SCREAMING_SNAKE_CASE = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE = 1 - alpha
SCREAMING_SNAKE_CASE = -2 * _cos
SCREAMING_SNAKE_CASE = 1 + alpha
SCREAMING_SNAKE_CASE = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1 / sqrt(2 ) , ) ->IIRFilter:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tau * frequency / samplerate
SCREAMING_SNAKE_CASE = sin(lowercase_ )
SCREAMING_SNAKE_CASE = cos(lowercase_ )
SCREAMING_SNAKE_CASE = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE = 1_0 ** (gain_db / 4_0)
SCREAMING_SNAKE_CASE = 1 + alpha * big_a
SCREAMING_SNAKE_CASE = -2 * _cos
SCREAMING_SNAKE_CASE = 1 - alpha * big_a
SCREAMING_SNAKE_CASE = 1 + alpha / big_a
SCREAMING_SNAKE_CASE = -2 * _cos
SCREAMING_SNAKE_CASE = 1 - alpha / big_a
SCREAMING_SNAKE_CASE = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1 / sqrt(2 ) , ) ->IIRFilter:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tau * frequency / samplerate
SCREAMING_SNAKE_CASE = sin(lowercase_ )
SCREAMING_SNAKE_CASE = cos(lowercase_ )
SCREAMING_SNAKE_CASE = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE = 1_0 ** (gain_db / 4_0)
SCREAMING_SNAKE_CASE = (big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE = (big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE = (big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE = (big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE = 2 * sqrt(lowercase_ ) * alpha
SCREAMING_SNAKE_CASE = big_a * (pmc + aaa)
SCREAMING_SNAKE_CASE = 2 * big_a * mpc
SCREAMING_SNAKE_CASE = big_a * (pmc - aaa)
SCREAMING_SNAKE_CASE = ppmc + aaa
SCREAMING_SNAKE_CASE = -2 * pmpc
SCREAMING_SNAKE_CASE = ppmc - aaa
SCREAMING_SNAKE_CASE = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1 / sqrt(2 ) , ) ->IIRFilter:
"""simple docstring"""
SCREAMING_SNAKE_CASE = tau * frequency / samplerate
SCREAMING_SNAKE_CASE = sin(lowercase_ )
SCREAMING_SNAKE_CASE = cos(lowercase_ )
SCREAMING_SNAKE_CASE = _sin / (2 * q_factor)
SCREAMING_SNAKE_CASE = 1_0 ** (gain_db / 4_0)
SCREAMING_SNAKE_CASE = (big_a + 1) - (big_a - 1) * _cos
SCREAMING_SNAKE_CASE = (big_a + 1) + (big_a - 1) * _cos
SCREAMING_SNAKE_CASE = (big_a - 1) - (big_a + 1) * _cos
SCREAMING_SNAKE_CASE = (big_a - 1) + (big_a + 1) * _cos
SCREAMING_SNAKE_CASE = 2 * sqrt(lowercase_ ) * alpha
SCREAMING_SNAKE_CASE = big_a * (ppmc + aaa)
SCREAMING_SNAKE_CASE = -2 * big_a * pmpc
SCREAMING_SNAKE_CASE = big_a * (ppmc - aaa)
SCREAMING_SNAKE_CASE = pmc + aaa
SCREAMING_SNAKE_CASE = 2 * mpc
SCREAMING_SNAKE_CASE = pmc - aaa
SCREAMING_SNAKE_CASE = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 700 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class a_( lowercase__ ):
"""simple docstring"""
__snake_case : int ='''van'''
def __init__( self : Tuple , lowerCAmelCase__ : Optional[int]=2_2_4 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Tuple=[7, 3, 3, 3] , lowerCAmelCase__ : Any=[4, 2, 2, 2] , lowerCAmelCase__ : Optional[int]=[6_4, 1_2_8, 3_2_0, 5_1_2] , lowerCAmelCase__ : Optional[Any]=[3, 3, 1_2, 3] , lowerCAmelCase__ : str=[8, 8, 4, 4] , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : Union[str, Any]=0.02 , lowerCAmelCase__ : Any=1e-6 , lowerCAmelCase__ : Optional[Any]=1e-2 , lowerCAmelCase__ : Union[str, Any]=0.0 , lowerCAmelCase__ : Any=0.0 , **lowerCAmelCase__ : Tuple , ) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = layer_scale_init_value
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = dropout_rate
| 259 | 0 |
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = tmp_path / "file.csv"
_lowerCamelCase : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Any = tmp_path / "malformed_file.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : int = tmp_path / "csv_with_image.csv"
_lowerCamelCase : int = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_label.csv"
_lowerCamelCase : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_lowerCamelCase : Dict = tmp_path / "csv_with_int_list.csv"
_lowerCamelCase : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : List[Any] = Csv()
_lowerCamelCase : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def A_ ( _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : Any = f.read().splitlines()[1]
_lowerCamelCase : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
_lowerCamelCase : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
_lowerCamelCase : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
_lowerCamelCase : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
_lowerCamelCase : List[Any] = f.read().splitlines()[1:]
_lowerCamelCase : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
_lowerCamelCase : Tuple = csv._generate_tables([[csv_file_with_label]] )
_lowerCamelCase : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
_lowerCamelCase : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
_lowerCamelCase : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda _lowerCAmelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
_lowerCamelCase : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
_lowerCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
_lowerCamelCase : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]] | 44 | import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowerCamelCase : Union[str, Any] = get_tests_dir('''fixtures''')
class lowercase ( unittest.TestCase ):
def __snake_case( self : str ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = mock.Mock()
SCREAMING_SNAKE_CASE = 500
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = HTTPError
SCREAMING_SNAKE_CASE = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_UpperCamelCase ) as mock_head:
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class lowercase ( unittest.TestCase ):
@classmethod
def __snake_case( cls : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = TOKEN
HfFolder.save_token(_UpperCamelCase )
@classmethod
def __snake_case( cls : Dict ) -> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def __snake_case( self : Any ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCamelCase , repo_id="test-feature-extractor" , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(F"{USER}/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def __snake_case( self : Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_UpperCamelCase , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=_UpperCamelCase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase ) )
def __snake_case( self : List[str] ) -> Tuple:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE = CustomFeatureExtractor.from_pretrained(_UpperCamelCase )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(
F"{USER}/test-dynamic-feature-extractor" , trust_remote_code=_UpperCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 403 | 0 |
'''simple docstring'''
import requests
snake_case_ = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def _lowerCamelCase( UpperCamelCase__ : str ) -> None:
# fetching a list of articles in json format
A : Any = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 537 |
'''simple docstring'''
import requests
snake_case_ = """YOUR API KEY"""
def _lowerCamelCase( UpperCamelCase__ : str , UpperCamelCase__ : str = giphy_api_key ) -> list:
A : Optional[Any] = '''+'''.join(query.split() )
A : List[Any] = F'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
A : int = requests.get(UpperCamelCase__ ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 537 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''beit'''
def __init__( self : Optional[Any] , UpperCAmelCase : Dict=8192 , UpperCAmelCase : int=768 , UpperCAmelCase : int=12 , UpperCAmelCase : List[str]=12 , UpperCAmelCase : int=3072 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : str=1e-12 , UpperCAmelCase : Dict=224 , UpperCAmelCase : Dict=16 , UpperCAmelCase : int=3 , UpperCAmelCase : int=False , UpperCAmelCase : Tuple=False , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Union[str, Any]=[3, 5, 7, 11] , UpperCAmelCase : List[str]=[1, 2, 3, 6] , UpperCAmelCase : Dict=True , UpperCAmelCase : int=0.4 , UpperCAmelCase : int=256 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : str=False , UpperCAmelCase : int=255 , **UpperCAmelCase : Dict , ) -> int:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Any =vocab_size
lowercase : int =hidden_size
lowercase : int =num_hidden_layers
lowercase : List[Any] =num_attention_heads
lowercase : Optional[int] =intermediate_size
lowercase : Any =hidden_act
lowercase : Optional[int] =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Union[str, Any] =initializer_range
lowercase : List[str] =layer_norm_eps
lowercase : str =image_size
lowercase : List[str] =patch_size
lowercase : List[str] =num_channels
lowercase : int =use_mask_token
lowercase : Dict =use_absolute_position_embeddings
lowercase : Optional[int] =use_relative_position_bias
lowercase : List[str] =use_shared_relative_position_bias
lowercase : List[str] =layer_scale_init_value
lowercase : Any =drop_path_rate
lowercase : Tuple =use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase : Optional[Any] =out_indices
lowercase : Dict =pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase : List[Any] =use_auxiliary_head
lowercase : str =auxiliary_loss_weight
lowercase : Optional[int] =auxiliary_channels
lowercase : Union[str, Any] =auxiliary_num_convs
lowercase : Tuple =auxiliary_concat_input
lowercase : Dict =semantic_loss_ignore_index
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = version.parse('''1.11''' )
@property
def A__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A__ ( self : Dict ) -> float:
'''simple docstring'''
return 1e-4
| 94 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__A ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''shortest_edge''': 224}
lowercase : str =get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowercase : List[str] =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : List[Any] =get_size_dict(UpperCAmelCase , param_name='''crop_size''' )
lowercase : Tuple =do_resize
lowercase : Any =size
lowercase : Optional[Any] =do_center_crop
lowercase : str =crop_size
lowercase : Any =resample
lowercase : List[Any] =do_rescale
lowercase : Dict =rescale_factor
lowercase : List[str] =do_normalize
lowercase : Optional[int] =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : List[Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[str] , ) -> np.ndarray:
'''simple docstring'''
lowercase : str =get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" in size:
lowercase : int =get_resize_output_image_size(UpperCAmelCase , size['''shortest_edge'''] , default_to_square=UpperCAmelCase )
elif "height" in size and "width" in size:
lowercase : str =(size['''height'''], size['''width'''])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : List[str] =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : int , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Tuple , ) -> List[Any]:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : int , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : int , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Dict =to_numpy_array(UpperCAmelCase )
if do_resize:
lowercase : Union[str, Any] =self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase )
if do_center_crop:
lowercase : Optional[Any] =self.center_crop(UpperCAmelCase , size=UpperCAmelCase )
if do_rescale:
lowercase : Dict =self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase )
if do_normalize:
lowercase : int =self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase )
lowercase : Optional[Any] =to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase )
return image
def A__ ( self : List[str] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Optional[int] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Any =do_resize if do_resize is not None else self.do_resize
lowercase : Union[str, Any] =resample if resample is not None else self.resample
lowercase : Tuple =do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : str =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[str] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Optional[Any] =size if size is not None else self.size
lowercase : Any =get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowercase : Union[str, Any] =crop_size if crop_size is not None else self.crop_size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase , param_name='''crop_size''' )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : List[str] =make_batched(UpperCAmelCase )
lowercase : Union[str, Any] =[
[
self._preprocess_image(
image=UpperCAmelCase , do_resize=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , do_center_crop=UpperCAmelCase , crop_size=UpperCAmelCase , do_rescale=UpperCAmelCase , rescale_factor=UpperCAmelCase , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , data_format=UpperCAmelCase , )
for img in video
]
for video in videos
]
lowercase : Dict ={'''pixel_values''': videos}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 94 | 1 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCAmelCase_ : Tuple = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCAmelCase_ : Dict = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCAmelCase_ : Union[str, Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
lowerCAmelCase_ : Any = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
lowerCAmelCase_ : Dict = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
lowerCAmelCase_ : List[Any] = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
lowerCAmelCase_ : Any = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCAmelCase_ : Tuple = np.expand_dims(test_image, axis=0)
lowerCAmelCase_ : Any = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCAmelCase_ : List[Any] = 'Normal'
if result[0][0] == 1:
lowerCAmelCase_ : str = 'Abnormality detected'
| 464 |
'''simple docstring'''
import string
from math import logaa
def UpperCAmelCase ( A : str , A : str ):
SCREAMING_SNAKE_CASE : Optional[Any] = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
SCREAMING_SNAKE_CASE : Tuple = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCAmelCase ( A : str , A : str ):
SCREAMING_SNAKE_CASE : int = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
SCREAMING_SNAKE_CASE : Any = corpus_without_punctuation.split('''\n''' )
SCREAMING_SNAKE_CASE : Optional[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(A ))
def UpperCAmelCase ( A : int , A : int , A : str=False ):
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def UpperCAmelCase ( A : int , A : int ):
return round(tf * idf , 3 )
| 464 | 1 |
'''simple docstring'''
import os
def _UpperCamelCase ()-> str:
'''simple docstring'''
__snake_case = os.path.join(os.path.dirname(_lowerCamelCase ) , '''num.txt''' )
with open(_lowerCamelCase ) as file_hand:
return str(sum(int(_lowerCamelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 24 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a ( __lowercase ):
SCREAMING_SNAKE_CASE__ : jnp.ndarray
@flax_register_to_config
class a ( nn.Module ,__lowercase ,__lowercase ):
SCREAMING_SNAKE_CASE__ : int = 32
SCREAMING_SNAKE_CASE__ : int = 4
SCREAMING_SNAKE_CASE__ : int = 4
SCREAMING_SNAKE_CASE__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
SCREAMING_SNAKE_CASE__ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
SCREAMING_SNAKE_CASE__ : Union[bool, Tuple[bool]] = False
SCREAMING_SNAKE_CASE__ : Tuple[int] = (320, 640, 1280, 1280)
SCREAMING_SNAKE_CASE__ : int = 2
SCREAMING_SNAKE_CASE__ : Union[int, Tuple[int]] = 8
SCREAMING_SNAKE_CASE__ : Optional[Union[int, Tuple[int]]] = None
SCREAMING_SNAKE_CASE__ : int = 1280
SCREAMING_SNAKE_CASE__ : float = 0.0
SCREAMING_SNAKE_CASE__ : bool = False
SCREAMING_SNAKE_CASE__ : jnp.dtype = jnp.floataa
SCREAMING_SNAKE_CASE__ : bool = True
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : bool = False
def snake_case_ ( self , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = (1, self.in_channels, self.sample_size, self.sample_size)
__SCREAMING_SNAKE_CASE: Tuple = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE: Optional[Any] = jnp.ones((1,) , dtype=jnp.intaa )
__SCREAMING_SNAKE_CASE: Optional[int] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = jax.random.split(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )["params"]
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.block_out_channels
__SCREAMING_SNAKE_CASE: Union[str, Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__SCREAMING_SNAKE_CASE: Any = self.num_attention_heads or self.attention_head_dim
# input
__SCREAMING_SNAKE_CASE: str = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__SCREAMING_SNAKE_CASE: int = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__SCREAMING_SNAKE_CASE: Union[str, Any] = FlaxTimestepEmbedding(_lowerCAmelCase , dtype=self.dtype )
__SCREAMING_SNAKE_CASE: Optional[int] = self.only_cross_attention
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Any = (num_attention_heads,) * len(self.down_block_types )
# down
__SCREAMING_SNAKE_CASE: Union[str, Any] = []
__SCREAMING_SNAKE_CASE: List[str] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__SCREAMING_SNAKE_CASE: List[str] = output_channel
__SCREAMING_SNAKE_CASE: str = block_out_channels[i]
__SCREAMING_SNAKE_CASE: Any = i == len(_lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__SCREAMING_SNAKE_CASE: str = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE: Tuple = FlaxDownBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: str = down_blocks
# mid
__SCREAMING_SNAKE_CASE: Union[str, Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__SCREAMING_SNAKE_CASE: Optional[int] = []
__SCREAMING_SNAKE_CASE: Tuple = list(reversed(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Optional[int] = list(reversed(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: str = list(reversed(_lowerCAmelCase ) )
__SCREAMING_SNAKE_CASE: Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__SCREAMING_SNAKE_CASE: int = output_channel
__SCREAMING_SNAKE_CASE: List[str] = reversed_block_out_channels[i]
__SCREAMING_SNAKE_CASE: List[str] = reversed_block_out_channels[min(i + 1 , len(_lowerCAmelCase ) - 1 )]
__SCREAMING_SNAKE_CASE: Union[str, Any] = i == len(_lowerCAmelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__SCREAMING_SNAKE_CASE: Optional[int] = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , prev_output_channel=_lowerCAmelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE: int = FlaxUpBlockaD(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , prev_output_channel=_lowerCAmelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = output_channel
__SCREAMING_SNAKE_CASE: Union[str, Any] = up_blocks
# out
__SCREAMING_SNAKE_CASE: Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__SCREAMING_SNAKE_CASE: Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase = True , _lowerCAmelCase = False , ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , jnp.ndarray ):
__SCREAMING_SNAKE_CASE: Dict = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE: Optional[Any] = timesteps.astype(dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE: Union[str, Any] = jnp.expand_dims(_lowerCAmelCase , 0 )
__SCREAMING_SNAKE_CASE: Any = self.time_proj(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = self.time_embedding(_lowerCAmelCase )
# 2. pre-process
__SCREAMING_SNAKE_CASE: int = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE: List[str] = self.conv_in(_lowerCAmelCase )
# 3. down
__SCREAMING_SNAKE_CASE: Dict = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[Any] = down_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Tuple = down_block(_lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__SCREAMING_SNAKE_CASE: Union[str, Any] = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCAmelCase , _lowerCAmelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__SCREAMING_SNAKE_CASE: Union[str, Any] = new_down_block_res_samples
# 4. mid
__SCREAMING_SNAKE_CASE: Dict = self.mid_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__SCREAMING_SNAKE_CASE: Union[str, Any] = down_block_res_samples[-(self.layers_per_block + 1) :]
__SCREAMING_SNAKE_CASE: Union[str, Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE: Any = up_block(
_lowerCAmelCase , temb=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , res_hidden_states_tuple=_lowerCAmelCase , deterministic=not train , )
else:
__SCREAMING_SNAKE_CASE: List[str] = up_block(_lowerCAmelCase , temb=_lowerCAmelCase , res_hidden_states_tuple=_lowerCAmelCase , deterministic=not train )
# 6. post-process
__SCREAMING_SNAKE_CASE: Optional[Any] = self.conv_norm_out(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = nn.silu(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = self.conv_out(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: int = jnp.transpose(_lowerCAmelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCAmelCase )
| 202 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowerCAmelCase__ : Tuple = 1024
lowerCAmelCase__ : Optional[Any] = 4096
lowerCAmelCase__ : Tuple = 24
lowerCAmelCase__ : Optional[Any] = 16
lowerCAmelCase__ : Tuple = [5, 11, 17, 23]
lowerCAmelCase__ : Dict = [256, 512, 1024, 1024]
lowerCAmelCase__ : Any = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCAmelCase__ : Any = 768
lowerCAmelCase__ : List[Any] = [1, 1, 1, 0.5]
lowerCAmelCase__ : int = [256, 512, 768, 768]
lowerCAmelCase__ : Dict = 150
lowerCAmelCase__ : Union[str, Any] = 16
lowerCAmelCase__ : str = (1, 384, 384)
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : List[str] = """project"""
if "ade" in checkpoint_url:
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Any = 768
lowerCAmelCase__ : Any = [1, 1, 1, 0.5]
lowerCAmelCase__ : Any = 150
lowerCAmelCase__ : Dict = 16
lowerCAmelCase__ : Any = """huggingface/label-files"""
lowerCAmelCase__ : Tuple = """ade20k-id2label.json"""
lowerCAmelCase__ : Tuple = json.load(open(cached_download(hf_hub_url(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
lowerCAmelCase__ : int = {int(UpperCamelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : List[str] = idalabel
lowerCAmelCase__ : int = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = [1, 150, 480, 480]
return config, expected_shape
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCAmelCase__ : str = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCAmelCase__ : List[str] = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCAmelCase__ : str = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowerCAmelCase__ : str = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCAmelCase__ : str = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCAmelCase__ : List[str] = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCAmelCase__ : Tuple = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCAmelCase__ : int = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase__ : Dict = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowerCAmelCase__ : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowerCAmelCase__ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCAmelCase__ : Optional[int] = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCAmelCase__ : int = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCAmelCase__ : int = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCAmelCase__ : int = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCAmelCase__ : Optional[int] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCAmelCase__ : Optional[Any] = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowerCAmelCase__ : str = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCAmelCase__ : Dict = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCAmelCase__ : List[str] = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCAmelCase__ : Optional[int] = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCAmelCase__ : str = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCAmelCase__ : int = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCAmelCase__ : Any = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCAmelCase__ : Optional[int] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCAmelCase__ : Tuple = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCAmelCase__ : Dict = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCAmelCase__ : Tuple = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCAmelCase__ : List[str] = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCAmelCase__ : int = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCAmelCase__ : str = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCAmelCase__ : Optional[Any] = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCAmelCase__ : List[str] = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCAmelCase__ : str = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowerCAmelCase__ : str = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowerCAmelCase__ : str = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowerCAmelCase__ : Dict = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCAmelCase__ : Dict = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowerCAmelCase__ : Any = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowerCAmelCase__ : int = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowerCAmelCase__ : int = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowerCAmelCase__ : int = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowerCAmelCase__ : str = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase__ : Optional[Any] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowerCAmelCase__ : Union[str, Any] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowerCAmelCase__ : Dict = in_proj_bias[: config.hidden_size]
lowerCAmelCase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase__ : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase__ : str = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase__ : Any = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase__ : Any = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = get_dpt_config(UpperCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCAmelCase__ : Dict = torch.load(UpperCamelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(UpperCamelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase__ : List[str] = state_dict.pop(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = val
# read in qkv matrices
read_in_q_k_v(UpperCamelCase , UpperCamelCase )
# load HuggingFace model
lowerCAmelCase__ : List[Any] = DPTForSemanticSegmentation(UpperCamelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# Check outputs on an image
lowerCAmelCase__ : Union[str, Any] = 480 if """ade""" in checkpoint_url else 384
lowerCAmelCase__ : Tuple = DPTImageProcessor(size=UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = prepare_img()
lowerCAmelCase__ : Optional[int] = image_processor(UpperCamelCase , return_tensors="""pt""" )
# forward pass
lowerCAmelCase__ : Any = model(**UpperCamelCase ).logits if """ade""" in checkpoint_url else model(**UpperCamelCase ).predicted_depth
if show_prediction:
lowerCAmelCase__ : Dict = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=UpperCamelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCamelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
_lowerCAmelCase = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 160 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
_lowerCAmelCase = {'''tokenization_herbert''': ['''HerbertTokenizer''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ['''HerbertTokenizerFast''']
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160 | 1 |
def UpperCAmelCase ( a_ = 1_0_0_0 ) -> int:
"""simple docstring"""
__A = 3
__A = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 55 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase =get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
UpperCAmelCase =50_003
UpperCAmelCase =50_002
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = PLBartTokenizer
_lowerCamelCase = None
_lowerCamelCase = False
def UpperCamelCase__ ( self ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
A = PLBartTokenizer(lowerCamelCase_ ,language_codes="""base""" ,keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ) -> int:
A = PLBartTokenizer(lowerCamelCase_ ,language_codes="""base""" ,keep_accents=lowerCamelCase_ )
A = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] ,)
A = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
A = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ ,[
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] ,)
A = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
A = tokenizer.vocab_size
A = [tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) for x in range(end - 4 ,lowerCamelCase_ )]
self.assertListEqual(lowerCamelCase_ ,["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
A = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A = tokenizer(lowerCamelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ,clean_up_tokenization_spaces=lowerCamelCase_ ) ,lowerCamelCase_ ,)
def UpperCamelCase__ ( self ) -> Optional[Any]:
A = PLBartTokenizer(lowerCamelCase_ ,language_codes="""multi""" ,keep_accents=lowerCamelCase_ )
A = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase_ ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) ,[value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] ,)
A = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
A = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ ,[
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] ,)
A = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
A = tokenizer.vocab_size
A = [tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) for x in range(end - 7 ,lowerCamelCase_ )]
self.assertListEqual(
lowerCamelCase_ ,["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
A = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
A = tokenizer(lowerCamelCase_ ).input_ids
self.assertEqual(
tokenizer.decode(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ ,clean_up_tokenization_spaces=lowerCamelCase_ ) ,lowerCamelCase_ ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = '''uclanlp/plbart-python-en_XX'''
_lowerCamelCase = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
_lowerCamelCase = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
_lowerCamelCase = [
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def UpperCamelCase__ ( cls ) -> List[str]:
A = PLBartTokenizer.from_pretrained(
cls.checkpoint_name ,language_codes="""base""" ,src_lang="""python""" ,tgt_lang="""en_XX""" )
A = 1
return cls
def UpperCamelCase__ ( self ) -> Optional[int]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] ,5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] ,5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] ,5_0_0_0_3 )
def UpperCamelCase__ ( self ) -> Optional[int]:
A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> str:
self.assertIn(lowerCamelCase_ ,self.tokenizer.all_special_ids )
A = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
A = self.tokenizer.decode(lowerCamelCase_ ,skip_special_tokens=lowerCamelCase_ )
A = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ ,lowerCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> List[str]:
A = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 2_0]
self.assertIsInstance(src_text[0] ,lowerCamelCase_ )
A = 1_0
A = self.tokenizer(lowerCamelCase_ ,max_length=lowerCamelCase_ ,truncation=lowerCamelCase_ ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,lowerCamelCase_ )
self.assertEqual(len(lowerCamelCase_ ) ,lowerCamelCase_ )
def UpperCamelCase__ ( self ) -> Any:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) ,[5_0_0_0_4, 5_0_0_0_1] )
def UpperCamelCase__ ( self ) -> Optional[int]:
A = tempfile.mkdtemp()
A = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase_ )
A = PLBartTokenizer.from_pretrained(lowerCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,lowerCamelCase_ )
@require_torch
def UpperCamelCase__ ( self ) -> Optional[int]:
A = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase_ ,return_tensors="""pt""" )
A = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() ,[2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] ,lowerCamelCase_ )
self.assertEqual(batch.decoder_input_ids[1][-1] ,2 )
self.assertEqual(batch.labels[1][-2:].tolist() ,[2, EN_CODE] )
@require_torch
def UpperCamelCase__ ( self ) -> str:
A = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,)
A = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCamelCase_ ,lowerCamelCase_ )
self.assertEqual((2, 2_6) ,batch.input_ids.shape )
self.assertEqual((2, 2_6) ,batch.attention_mask.shape )
A = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase_ )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, PYTHON_CODE] )
def UpperCamelCase__ ( self ) -> Tuple:
A = self.tokenizer(self.src_text ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=3 ,return_tensors="""pt""" )
A = self.tokenizer(
text_target=self.tgt_text ,padding=lowerCamelCase_ ,truncation=lowerCamelCase_ ,max_length=1_0 ,return_tensors="""pt""" )
A = targets["""input_ids"""]
A = shift_tokens_right(lowerCamelCase_ ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,1_0 )
@require_torch
def UpperCamelCase__ ( self ) -> List[Any]:
A = self.tokenizer._build_translation_inputs(
"""A test""" ,return_tensors="""pt""" ,src_lang="""en_XX""" ,tgt_lang="""java""" )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) ,{
# A, test, EOS, en_XX
"""input_ids""": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 5_0_0_0_1,
} ,)
| 617 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _lowerCAmelCase( __A ):
UpperCAmelCase = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
UpperCAmelCase = 1024
UpperCAmelCase = 4096
UpperCAmelCase = 24
UpperCAmelCase = 16
UpperCAmelCase = [5, 11, 17, 23]
UpperCAmelCase = [256, 512, 1024, 1024]
UpperCAmelCase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
UpperCAmelCase = 768
UpperCAmelCase = [1, 1, 1, 0.5]
UpperCAmelCase = [256, 512, 768, 768]
UpperCAmelCase = 150
UpperCAmelCase = 16
UpperCAmelCase = (1, 384, 384)
UpperCAmelCase = False
UpperCAmelCase = "project"
if "ade" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = 768
UpperCAmelCase = [1, 1, 1, 0.5]
UpperCAmelCase = 150
UpperCAmelCase = 16
UpperCAmelCase = "huggingface/label-files"
UpperCAmelCase = "ade20k-id2label.json"
UpperCAmelCase = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type="dataset" ) ) , "r" ) )
UpperCAmelCase = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = [1, 150, 480, 480]
return config, expected_shape
def _lowerCAmelCase( __A ):
UpperCAmelCase = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__A , __A )
def _lowerCAmelCase( __A ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
UpperCAmelCase = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
UpperCAmelCase = name.replace("patch_embed" , "" )
if "pos_embed" in name:
UpperCAmelCase = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
UpperCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
UpperCAmelCase = name.replace("proj" , "projection" )
if "blocks" in name:
UpperCAmelCase = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
UpperCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
UpperCAmelCase = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
UpperCAmelCase = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
UpperCAmelCase = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
UpperCAmelCase = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
UpperCAmelCase = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
UpperCAmelCase = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
UpperCAmelCase = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
UpperCAmelCase = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
UpperCAmelCase = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
UpperCAmelCase = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
UpperCAmelCase = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
UpperCAmelCase = name.replace("conv1" , "convolution1" )
if "conv2" in name:
UpperCAmelCase = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
UpperCAmelCase = name.replace("pretrained" , "dpt" )
if "bn" in name:
UpperCAmelCase = name.replace("bn" , "batch_norm" )
if "head" in name:
UpperCAmelCase = name.replace("head" , "head.head" )
if "encoder.norm" in name:
UpperCAmelCase = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
UpperCAmelCase = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
UpperCAmelCase = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
UpperCAmelCase = name.replace(".." , "." )
if "stem.conv" in name:
UpperCAmelCase = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
UpperCAmelCase = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
UpperCAmelCase = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
UpperCAmelCase = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
UpperCAmelCase = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
UpperCAmelCase = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
UpperCAmelCase = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def _lowerCAmelCase( __A , __A ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def _lowerCAmelCase( ):
UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase( __A , __A , __A , __A , __A ):
UpperCAmelCase , UpperCAmelCase = get_dpt_config(__A )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
UpperCAmelCase = torch.load(__A , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__A )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase = state_dict.pop(__A )
UpperCAmelCase = val
# read in qkv matrices
read_in_q_k_v(__A , __A )
# load HuggingFace model
UpperCAmelCase = DPTForSemanticSegmentation(__A ) if "ade" in checkpoint_url else DPTForDepthEstimation(__A )
model.load_state_dict(__A )
model.eval()
# Check outputs on an image
UpperCAmelCase = 480 if "ade" in checkpoint_url else 384
UpperCAmelCase = DPTImageProcessor(size=__A )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(__A , return_tensors="pt" )
# forward pass
UpperCAmelCase = model(**__A ).logits if "ade" in checkpoint_url else model(**__A ).predicted_depth
if show_prediction:
UpperCAmelCase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=__A , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__A )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__A )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 700 |
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __magic_name__ :
def __init__( self : Optional[int] ) -> Optional[Any]:
UpperCAmelCase = ""
UpperCAmelCase = ""
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = 2_5_6
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
def _UpperCamelCase ( self : Any , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
UpperCAmelCase = cva.imread(lowerCAmelCase__ , 0 )
UpperCAmelCase = copy.deepcopy(self.img )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="x" )
UpperCAmelCase = np.sum(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase = x[i] / self.k
self.sk += prk
UpperCAmelCase = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase = int(last % last )
UpperCAmelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase__ )
UpperCAmelCase = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _UpperCamelCase ( self : str ) -> int:
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def _UpperCamelCase ( self : Dict ) -> Optional[Any]:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase__ = os.path.join(os.path.basename(__file__), "image_data/input.jpg")
lowerCAmelCase__ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 1 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case : int = logging.get_logger(__name__)
class __lowercase ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = ["pixel_values"]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = 1 / 255 , A_ = True , A_ = None , A_ = None , A_ = True , **A_ , )-> None:
super().__init__(**A_ )
_SCREAMING_SNAKE_CASE = size if size is not None else {'height': 384, 'width': 384}
_SCREAMING_SNAKE_CASE = get_size_dict(A_ , default_to_square=A_ )
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = resample
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else OPENAI_CLIP_STD
_SCREAMING_SNAKE_CASE = do_convert_rgb
def __magic_name__ ( self , A_ , A_ , A_ = PILImageResampling.BICUBIC , A_ = None , **A_ , )-> np.ndarray:
_SCREAMING_SNAKE_CASE = get_size_dict(A_ , default_to_square=A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
_SCREAMING_SNAKE_CASE = (size['height'], size['width'])
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def __magic_name__ ( self , A_ , A_ , A_ = None , **A_ , )-> List[str]:
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def __magic_name__ ( self , A_ , A_ , A_ , A_ = None , **A_ , )-> np.ndarray:
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def __magic_name__ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , )-> PIL.Image.Image:
_SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
_SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
_SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
_SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
_SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
_SCREAMING_SNAKE_CASE = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_SCREAMING_SNAKE_CASE = size if size is not None else self.size
_SCREAMING_SNAKE_CASE = get_size_dict(A_ , default_to_square=A_ )
_SCREAMING_SNAKE_CASE = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_SCREAMING_SNAKE_CASE = [convert_to_rgb(A_ ) for image in images]
# All transformations expect numpy arrays.
_SCREAMING_SNAKE_CASE = [to_numpy_array(A_ ) for image in images]
if do_resize:
_SCREAMING_SNAKE_CASE = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_rescale:
_SCREAMING_SNAKE_CASE = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
_SCREAMING_SNAKE_CASE = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
_SCREAMING_SNAKE_CASE = [to_channel_dimension_format(A_ , A_ ) for image in images]
_SCREAMING_SNAKE_CASE = BatchFeature(data={'pixel_values': images} , tensor_type=A_ )
return encoded_outputs
| 605 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = MODEL_FOR_MASKED_LM_MAPPING
SCREAMING_SNAKE_CASE : Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING
def __magic_name__ ( self )-> Dict:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __magic_name__ ( self )-> List[str]:
_SCREAMING_SNAKE_CASE = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
_SCREAMING_SNAKE_CASE = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-0_5, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-0_5, 'token': 25506, 'token_str': ' accuser'},
] , )
_SCREAMING_SNAKE_CASE = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-0_5,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-0_5,
'token': 25506,
'token_str': ' accuser',
},
] , )
_SCREAMING_SNAKE_CASE = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-0_5, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-0_5, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def __magic_name__ ( self )-> Tuple:
_SCREAMING_SNAKE_CASE = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
_SCREAMING_SNAKE_CASE = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-0_5, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-0_5, 'token': 16416, 'token_str': 'ELS'},
] , )
_SCREAMING_SNAKE_CASE = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-0_5,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-0_5, 'token': 16416, 'token_str': 'ELS'},
] , )
_SCREAMING_SNAKE_CASE = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-0_5, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-0_5, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-0_5, 'token': 13606, 'token_str': ' Clara'},
] , )
_SCREAMING_SNAKE_CASE = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=6 ) , [
[
{
'score': 2.2e-0_5,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-0_5, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-0_5,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-0_5, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def __magic_name__ ( self )-> int:
_SCREAMING_SNAKE_CASE = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
_SCREAMING_SNAKE_CASE = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(A_ , A_ )
@slow
@require_torch
def __magic_name__ ( self )-> Optional[int]:
_SCREAMING_SNAKE_CASE = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(A_ )
@slow
@require_tf
def __magic_name__ ( self )-> str:
_SCREAMING_SNAKE_CASE = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(A_ )
def __magic_name__ ( self , A_ )-> List[str]:
_SCREAMING_SNAKE_CASE = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(A_ ) , [
{'sequence': 'My name is John', 'score': 0.008, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.007, 'token': 1573, 'token_str': ' Chris'},
] , )
_SCREAMING_SNAKE_CASE = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(A_ ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.251,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.214,
'token': 12790,
'token_str': ' Lyon',
},
] , )
_SCREAMING_SNAKE_CASE = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(A_ ) , [
{'sequence': 'My name is Patrick', 'score': 0.005, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.000, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.000, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def __magic_name__ ( self )-> Dict:
_SCREAMING_SNAKE_CASE = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
self.run_pipeline_test(A_ , [] )
@require_tf
def __magic_name__ ( self )-> Any:
_SCREAMING_SNAKE_CASE = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
self.run_pipeline_test(A_ , [] )
def __magic_name__ ( self , A_ , A_ , A_ )-> Optional[int]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
_SCREAMING_SNAKE_CASE = FillMaskPipeline(model=A_ , tokenizer=A_ )
_SCREAMING_SNAKE_CASE = [
F'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def __magic_name__ ( self , A_ , A_ )-> Optional[int]:
_SCREAMING_SNAKE_CASE = fill_masker.tokenizer
_SCREAMING_SNAKE_CASE = fill_masker.model
_SCREAMING_SNAKE_CASE = fill_masker(
F'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
A_ , [
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
] , )
_SCREAMING_SNAKE_CASE = fill_masker([F'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
A_ , [
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
] , )
_SCREAMING_SNAKE_CASE = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
A_ , [
[
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
],
[
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
],
] , )
with self.assertRaises(A_ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(A_ ):
fill_masker('This is' )
self.run_test_top_k(A_ , A_ )
self.run_test_targets(A_ , A_ )
self.run_test_top_k_targets(A_ , A_ )
self.fill_mask_with_duplicate_targets_and_top_k(A_ , A_ )
self.fill_mask_with_multiple_masks(A_ , A_ )
def __magic_name__ ( self , A_ , A_ )-> List[Any]:
_SCREAMING_SNAKE_CASE = tokenizer.get_vocab()
_SCREAMING_SNAKE_CASE = sorted(vocab.keys() )[:2]
# Pipeline argument
_SCREAMING_SNAKE_CASE = FillMaskPipeline(model=A_ , tokenizer=A_ , targets=A_ )
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
A_ , [
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
] , )
_SCREAMING_SNAKE_CASE = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , A_ )
_SCREAMING_SNAKE_CASE = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(A_ ) )
# Call argument
_SCREAMING_SNAKE_CASE = FillMaskPipeline(model=A_ , tokenizer=A_ )
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=A_ )
self.assertEqual(
A_ , [
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
] , )
_SCREAMING_SNAKE_CASE = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , A_ )
_SCREAMING_SNAKE_CASE = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(A_ ) )
# Score equivalence
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=A_ )
_SCREAMING_SNAKE_CASE = [top_mask['token_str'] for top_mask in outputs]
_SCREAMING_SNAKE_CASE = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(A_ ) == set(A_ ):
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=A_ )
_SCREAMING_SNAKE_CASE = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(A_ ) , nested_simplify(A_ ) )
# Raises with invalid
with self.assertRaises(A_ ):
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(A_ ):
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[''] )
with self.assertRaises(A_ ):
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets='' )
def __magic_name__ ( self , A_ , A_ )-> List[str]:
_SCREAMING_SNAKE_CASE = FillMaskPipeline(model=A_ , tokenizer=A_ , top_k=2 )
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
A_ , [
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
] , )
_SCREAMING_SNAKE_CASE = FillMaskPipeline(model=A_ , tokenizer=A_ )
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
A_ , [
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
] , )
self.assertEqual(nested_simplify(A_ ) , nested_simplify(A_ ) )
def __magic_name__ ( self , A_ , A_ )-> Dict:
_SCREAMING_SNAKE_CASE = tokenizer.get_vocab()
_SCREAMING_SNAKE_CASE = FillMaskPipeline(model=A_ , tokenizer=A_ )
# top_k=2, ntargets=3
_SCREAMING_SNAKE_CASE = sorted(vocab.keys() )[:3]
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=A_ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_SCREAMING_SNAKE_CASE = [el['token_str'] for el in sorted(A_ , key=lambda A_ : x["score"] , reverse=A_ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(A_ ).issubset(A_ ):
_SCREAMING_SNAKE_CASE = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=A_ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(A_ ) , nested_simplify(A_ ) )
def __magic_name__ ( self , A_ , A_ )-> Dict:
_SCREAMING_SNAKE_CASE = FillMaskPipeline(model=A_ , tokenizer=A_ )
_SCREAMING_SNAKE_CASE = tokenizer.get_vocab()
# String duplicates + id duplicates
_SCREAMING_SNAKE_CASE = sorted(vocab.keys() )[:3]
_SCREAMING_SNAKE_CASE = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_SCREAMING_SNAKE_CASE = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=A_ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(A_ ) , 3 )
def __magic_name__ ( self , A_ , A_ )-> int:
_SCREAMING_SNAKE_CASE = FillMaskPipeline(model=A_ , tokenizer=A_ )
_SCREAMING_SNAKE_CASE = fill_masker(
F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
A_ , [
[
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
],
[
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
],
[
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
{'sequence': ANY(A_ ), 'score': ANY(A_ ), 'token': ANY(A_ ), 'token_str': ANY(A_ )},
],
] , )
| 605 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case ( _a ):
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple=13 ,SCREAMING_SNAKE_CASE__ : Optional[int]=7 ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : Any=99 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=32 ,SCREAMING_SNAKE_CASE__ : List[str]=5 ,SCREAMING_SNAKE_CASE__ : Tuple=4 ,SCREAMING_SNAKE_CASE__ : Tuple=0.1 ,SCREAMING_SNAKE_CASE__ : str=0.1 ,SCREAMING_SNAKE_CASE__ : Dict=512 ,SCREAMING_SNAKE_CASE__ : Optional[int]=12 ,SCREAMING_SNAKE_CASE__ : List[Any]=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,SCREAMING_SNAKE_CASE__ : Dict="last" ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,):
SCREAMING_SNAKE_CASE:Union[str, Any] = parent
SCREAMING_SNAKE_CASE:Dict = batch_size
SCREAMING_SNAKE_CASE:int = seq_length
SCREAMING_SNAKE_CASE:Any = is_training
SCREAMING_SNAKE_CASE:Union[str, Any] = use_input_lengths
SCREAMING_SNAKE_CASE:List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE:Dict = use_labels
SCREAMING_SNAKE_CASE:Dict = gelu_activation
SCREAMING_SNAKE_CASE:Dict = sinusoidal_embeddings
SCREAMING_SNAKE_CASE:List[str] = causal
SCREAMING_SNAKE_CASE:List[Any] = asm
SCREAMING_SNAKE_CASE:List[str] = n_langs
SCREAMING_SNAKE_CASE:Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE:Optional[int] = n_special
SCREAMING_SNAKE_CASE:str = hidden_size
SCREAMING_SNAKE_CASE:Dict = num_hidden_layers
SCREAMING_SNAKE_CASE:Dict = num_attention_heads
SCREAMING_SNAKE_CASE:Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE:Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE:Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE:List[str] = type_vocab_size
SCREAMING_SNAKE_CASE:int = type_sequence_label_size
SCREAMING_SNAKE_CASE:Any = initializer_range
SCREAMING_SNAKE_CASE:str = num_labels
SCREAMING_SNAKE_CASE:Dict = num_choices
SCREAMING_SNAKE_CASE:Any = summary_type
SCREAMING_SNAKE_CASE:int = use_proj
SCREAMING_SNAKE_CASE:int = scope
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE:str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE:Tuple = None
if self.use_input_lengths:
SCREAMING_SNAKE_CASE:List[Any] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
SCREAMING_SNAKE_CASE:Any = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE:Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
SCREAMING_SNAKE_CASE:Dict = None
SCREAMING_SNAKE_CASE:List[Any] = None
SCREAMING_SNAKE_CASE:List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE:Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE:str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
SCREAMING_SNAKE_CASE:int = ids_tensor([self.batch_size] ,2 ).float()
SCREAMING_SNAKE_CASE:Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
SCREAMING_SNAKE_CASE:List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __UpperCamelCase ( self : Optional[int] ):
return FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,)
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,):
SCREAMING_SNAKE_CASE:Union[str, Any] = FlaubertModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Any = model(SCREAMING_SNAKE_CASE__ ,lengths=SCREAMING_SNAKE_CASE__ ,langs=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = model(SCREAMING_SNAKE_CASE__ ,langs=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[int] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str ,):
SCREAMING_SNAKE_CASE:Optional[int] = FlaubertWithLMHeadModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Optional[int] = model(SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,):
SCREAMING_SNAKE_CASE:Optional[int] = FlaubertForQuestionAnsweringSimple(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = model(SCREAMING_SNAKE_CASE__ ,start_positions=SCREAMING_SNAKE_CASE__ ,end_positions=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,):
SCREAMING_SNAKE_CASE:str = FlaubertForQuestionAnswering(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:List[Any] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = model(
SCREAMING_SNAKE_CASE__ ,start_positions=SCREAMING_SNAKE_CASE__ ,end_positions=SCREAMING_SNAKE_CASE__ ,cls_index=SCREAMING_SNAKE_CASE__ ,is_impossible=SCREAMING_SNAKE_CASE__ ,p_mask=SCREAMING_SNAKE_CASE__ ,)
SCREAMING_SNAKE_CASE:str = model(
SCREAMING_SNAKE_CASE__ ,start_positions=SCREAMING_SNAKE_CASE__ ,end_positions=SCREAMING_SNAKE_CASE__ ,cls_index=SCREAMING_SNAKE_CASE__ ,is_impossible=SCREAMING_SNAKE_CASE__ ,)
((SCREAMING_SNAKE_CASE) , ):List[Any] = result_with_labels.to_tuple()
SCREAMING_SNAKE_CASE:Optional[int] = model(SCREAMING_SNAKE_CASE__ ,start_positions=SCREAMING_SNAKE_CASE__ ,end_positions=SCREAMING_SNAKE_CASE__ )
((SCREAMING_SNAKE_CASE) , ):List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict ,):
SCREAMING_SNAKE_CASE:List[Any] = FlaubertForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Tuple = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,):
SCREAMING_SNAKE_CASE:List[str] = self.num_labels
SCREAMING_SNAKE_CASE:Optional[int] = FlaubertForTokenClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[str] ,):
SCREAMING_SNAKE_CASE:List[str] = self.num_choices
SCREAMING_SNAKE_CASE:Any = FlaubertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:str = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE:Dict = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE:List[Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
SCREAMING_SNAKE_CASE:int = model(
SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
):Tuple = config_and_inputs
SCREAMING_SNAKE_CASE:Tuple = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class _snake_case ( _a , _a , unittest.TestCase ):
_A : int = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int=False ):
SCREAMING_SNAKE_CASE:Any = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
SCREAMING_SNAKE_CASE:Dict = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:int = FlaubertModelTester(self )
SCREAMING_SNAKE_CASE:Dict = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,emb_dim=37 )
def __UpperCamelCase ( self : Any ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : str ):
SCREAMING_SNAKE_CASE:str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*SCREAMING_SNAKE_CASE__ )
@slow
def __UpperCamelCase ( self : List[str] ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE:Tuple = FlaubertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : str ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
SCREAMING_SNAKE_CASE:Union[str, Any] = True
SCREAMING_SNAKE_CASE:Optional[int] = model_class(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Any = torch.jit.trace(
SCREAMING_SNAKE_CASE__ ,(inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE__ ,os.path.join(SCREAMING_SNAKE_CASE__ ,"traced_model.pt" ) )
SCREAMING_SNAKE_CASE:Optional[Any] = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE__ ,"traced_model.pt" ) ,map_location=SCREAMING_SNAKE_CASE__ )
loaded(inputs_dict["input_ids"].to(SCREAMING_SNAKE_CASE__ ) ,inputs_dict["attention_mask"].to(SCREAMING_SNAKE_CASE__ ) )
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:Optional[int] = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE:Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )[0]
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1e-4 ) )
| 465 |
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def A_ ( snake_case=32 , snake_case=10 , snake_case=100 , snake_case=1026 , snake_case=True , snake_case="data/tokenized_stories_train_wikitext103.jbl" , snake_case="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[int] = generate_datasets(
snake_case , snake_case , number=snake_case , min_len=1026 , trim=snake_case )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE:Tuple = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
SCREAMING_SNAKE_CASE:Optional[Any] = load_gpta("gpt2" ).to(snake_case )
print("computing perplexity on objective set" )
SCREAMING_SNAKE_CASE:str = compute_perplexity(snake_case , snake_case , snake_case ).item()
print("perplexity on objective set:" , snake_case )
# collect igf pairs and save to file demo.jbl
collect_objective_set(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def A_ ( snake_case , snake_case=15 , snake_case=128 , snake_case=100 , snake_case="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
SCREAMING_SNAKE_CASE:int = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE:Optional[Any] = SecondaryLearner(snake_case )
# Train secondary learner
SCREAMING_SNAKE_CASE:int = train_secondary_learner(
snake_case , snake_case , max_epochs=snake_case , batch_size=snake_case , eval_freq=100 , igf_model_path=snake_case , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def A_ ( snake_case , snake_case , snake_case , snake_case=32 , snake_case=1000 , snake_case=16 , snake_case=1.0 , snake_case=recopy_gpta , snake_case=None , snake_case=10 , snake_case="gpt2_finetuned.pt" , ):
SCREAMING_SNAKE_CASE:str = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
SCREAMING_SNAKE_CASE:Dict = RandomSampler(snake_case )
SCREAMING_SNAKE_CASE:str = DataLoader(snake_case , sampler=snake_case )
SCREAMING_SNAKE_CASE:str = max_steps // (len(snake_case )) + 1
SCREAMING_SNAKE_CASE:Tuple = 0
SCREAMING_SNAKE_CASE:List[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=snake_case )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = recopy_model(snake_case , snake_case , snake_case )
model.train()
if secondary_learner is not None:
secondary_learner.to(snake_case )
secondary_learner.eval()
SCREAMING_SNAKE_CASE:List[Any] = []
SCREAMING_SNAKE_CASE:Tuple = 0
SCREAMING_SNAKE_CASE:Tuple = []
SCREAMING_SNAKE_CASE:Tuple = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE:int = compute_perplexity(snake_case , snake_case , snake_case )
test_perps.append(snake_case )
print("Test perplexity, step" , snake_case , ":" , snake_case )
for epoch in range(int(snake_case ) ):
for step, example in enumerate(snake_case ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE:str = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE:List[str] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE:Any = model(snake_case , labels=snake_case )
SCREAMING_SNAKE_CASE:List[Any] = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE:List[str] = secondary_learner.forward(
torch.tensor(snake_case , dtype=torch.long , device=snake_case ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(snake_case ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
SCREAMING_SNAKE_CASE:int = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE:Optional[int] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE:Union[str, Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE:int = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE:Optional[Any] = compute_perplexity(snake_case , snake_case , snake_case )
test_perps.append(snake_case )
print("Test perplexity, step" , snake_case , ":" , snake_case )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , snake_case )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def A_ ( ):
SCREAMING_SNAKE_CASE:Optional[int] = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=snake_case , type=snake_case , required=snake_case , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=snake_case , type=snake_case , required=snake_case , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=snake_case , default=snake_case , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=snake_case , default=snake_case , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=snake_case , type=snake_case , required=snake_case , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=snake_case , type=snake_case , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=snake_case , default=snake_case , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=snake_case , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=snake_case , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=snake_case , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=snake_case , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=snake_case , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=snake_case , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=snake_case , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=snake_case , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=snake_case , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=snake_case , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=snake_case , type=snake_case , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=snake_case , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=snake_case , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=snake_case , type=snake_case , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=snake_case , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE:Union[str, Any] = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
SCREAMING_SNAKE_CASE:Optional[int] = training_secondary_learner(
snake_case , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE:Dict = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=snake_case )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
snake_case , snake_case , snake_case , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=snake_case , secondary_learner=snake_case , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 465 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """levit"""
def __init__( self :Optional[int] , __snake_case :str=2_24 , __snake_case :Any=3 , __snake_case :List[str]=3 , __snake_case :List[Any]=2 , __snake_case :Optional[Any]=1 , __snake_case :Optional[int]=16 , __snake_case :List[str]=[1_28, 2_56, 3_84] , __snake_case :Dict=[4, 8, 12] , __snake_case :Optional[Any]=[4, 4, 4] , __snake_case :Union[str, Any]=[16, 16, 16] , __snake_case :Any=0 , __snake_case :Dict=[2, 2, 2] , __snake_case :List[Any]=[2, 2, 2] , __snake_case :List[Any]=0.02 , **__snake_case :Optional[int] , ):
'''simple docstring'''
super().__init__(**__snake_case )
__magic_name__ : List[Any] =image_size
__magic_name__ : Optional[int] =num_channels
__magic_name__ : Any =kernel_size
__magic_name__ : Optional[Any] =stride
__magic_name__ : Union[str, Any] =padding
__magic_name__ : Tuple =hidden_sizes
__magic_name__ : str =num_attention_heads
__magic_name__ : Dict =depths
__magic_name__ : Union[str, Any] =key_dim
__magic_name__ : int =drop_path_rate
__magic_name__ : List[Any] =patch_size
__magic_name__ : Dict =attention_ratio
__magic_name__ : List[Any] =mlp_ratio
__magic_name__ : Tuple =initializer_range
__magic_name__ : Any =[
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class __A ( UpperCamelCase__ ):
UpperCamelCase = version.parse("""1.11""" )
@property
def A__ ( self :Optional[int] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self :List[str] ):
'''simple docstring'''
return 1E-4
| 21 |
import math
import tensorflow as tf
from packaging import version
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : List[str] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : str =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Optional[Any] =tf.cast(math.pi , x.dtype )
__magic_name__ : int =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase , 3 )) ))
return x * cdf
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Any =tf.convert_to_tensor(lowerCamelCase )
return x * tf.tanh(tf.math.softplus(lowerCamelCase ) )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[Any] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Union[str, Any] =tf.cast(0.0_4_4_7_1_5 , x.dtype )
__magic_name__ : Tuple =tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : List[str] =tf.convert_to_tensor(lowerCamelCase )
__magic_name__ : Dict =tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.clip_by_value(_gelu(lowerCamelCase ) , -10 , 10 )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=-1 ):
__magic_name__ , __magic_name__ : List[Any] =tf.split(lowerCamelCase , 2 , axis=lowerCamelCase )
return a * tf.math.sigmoid(lowerCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowerCAmelCase_ ( lowerCamelCase ):
return tf.keras.activations.gelu(lowerCamelCase , approximate=lowerCamelCase )
UpperCAmelCase_ : List[str] = tf.keras.activations.gelu
UpperCAmelCase_ : Dict = approximate_gelu_wrap
else:
UpperCAmelCase_ : Dict = _gelu
UpperCAmelCase_ : str = _gelu_new
UpperCAmelCase_ : Any = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowerCAmelCase_ ( lowerCamelCase ):
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 21 | 1 |
from PIL import Image
def lowerCamelCase__ ( _a , _a):
def brightness(_a) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)")
return img.point(_a)
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
a_ = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png') | 707 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = size
SCREAMING_SNAKE_CASE : Union[str, Any] = [0] * size
SCREAMING_SNAKE_CASE : Union[str, Any] = [0] * size
@staticmethod
def __UpperCamelCase ( a : int ) -> int:
"""simple docstring"""
return index | (index + 1)
@staticmethod
def __UpperCamelCase ( a : int ) -> int:
"""simple docstring"""
return (index & (index + 1)) - 1
def __UpperCamelCase ( self : Any , a : int , a : int ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = value
while index < self.size:
SCREAMING_SNAKE_CASE : Dict = self.get_prev(a ) + 1
if current_left_border == index:
SCREAMING_SNAKE_CASE : Optional[int] = value
else:
SCREAMING_SNAKE_CASE : Tuple = max(a , a , a )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_next(a )
def __UpperCamelCase ( self : Optional[int] , a : int , a : int ) -> int:
"""simple docstring"""
right -= 1 # Because of right is exclusive
SCREAMING_SNAKE_CASE : Optional[int] = 0
while left <= right:
SCREAMING_SNAKE_CASE : List[Any] = self.get_prev(a )
if left <= current_left:
SCREAMING_SNAKE_CASE : List[Any] = max(a , self.tree[right] )
SCREAMING_SNAKE_CASE : str = current_left
else:
SCREAMING_SNAKE_CASE : List[str] = max(a , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod() | 193 | 0 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __get__( self : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple=None ) -> str:
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
_UpperCAmelCase : List[Any] = "__cached_" + self.fget.__name__
_UpperCAmelCase : List[str] = getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if cached is None:
_UpperCAmelCase : int = self.fget(lowerCAmelCase__ )
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return cached
def __UpperCAmelCase ( a_: Optional[int] ):
_UpperCAmelCase : Optional[Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f"""invalid truth value {val!r}""" )
def __UpperCAmelCase ( a_: Tuple ):
if is_torch_fx_proxy(a_ ):
return True
if is_torch_available():
import torch
if isinstance(a_, torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(a_, tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(a_, (jnp.ndarray, Tracer) ):
return True
return isinstance(a_, np.ndarray )
def __UpperCAmelCase ( a_: Any ):
return isinstance(a_, np.ndarray )
def __UpperCAmelCase ( a_: Dict ):
return _is_numpy(a_ )
def __UpperCAmelCase ( a_: List[Any] ):
import torch
return isinstance(a_, torch.Tensor )
def __UpperCAmelCase ( a_: Optional[Any] ):
return False if not is_torch_available() else _is_torch(a_ )
def __UpperCAmelCase ( a_: Optional[int] ):
import torch
return isinstance(a_, torch.device )
def __UpperCAmelCase ( a_: Union[str, Any] ):
return False if not is_torch_available() else _is_torch_device(a_ )
def __UpperCAmelCase ( a_: Tuple ):
import torch
if isinstance(a_, a_ ):
if hasattr(a_, a_ ):
_UpperCAmelCase : List[Any] = getattr(a_, a_ )
else:
return False
return isinstance(a_, torch.dtype )
def __UpperCAmelCase ( a_: Tuple ):
return False if not is_torch_available() else _is_torch_dtype(a_ )
def __UpperCAmelCase ( a_: List[str] ):
import tensorflow as tf
return isinstance(a_, tf.Tensor )
def __UpperCAmelCase ( a_: Tuple ):
return False if not is_tf_available() else _is_tensorflow(a_ )
def __UpperCAmelCase ( a_: Optional[Any] ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(a_, "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(a_ )
return type(a_ ) == tf.Tensor
def __UpperCAmelCase ( a_: Dict ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(a_ )
def __UpperCAmelCase ( a_: Dict ):
import jax.numpy as jnp # noqa: F811
return isinstance(a_, jnp.ndarray )
def __UpperCAmelCase ( a_: List[Any] ):
return False if not is_flax_available() else _is_jax(a_ )
def __UpperCAmelCase ( a_: Dict ):
if isinstance(a_, (dict, UserDict) ):
return {k: to_py_obj(a_ ) for k, v in obj.items()}
elif isinstance(a_, (list, tuple) ):
return [to_py_obj(a_ ) for o in obj]
elif is_tf_tensor(a_ ):
return obj.numpy().tolist()
elif is_torch_tensor(a_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(a_ ):
return np.asarray(a_ ).tolist()
elif isinstance(a_, (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def __UpperCAmelCase ( a_: Union[str, Any] ):
if isinstance(a_, (dict, UserDict) ):
return {k: to_numpy(a_ ) for k, v in obj.items()}
elif isinstance(a_, (list, tuple) ):
return np.array(a_ )
elif is_tf_tensor(a_ ):
return obj.numpy()
elif is_torch_tensor(a_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(a_ ):
return np.asarray(a_ )
else:
return obj
class A__ ( UpperCamelCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : int = fields(self )
# Safety and consistency checks
if not len(lowerCAmelCase__ ):
raise ValueError(F"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F"""{self.__class__.__name__} should not have more than one required field.""" )
_UpperCAmelCase : Union[str, Any] = getattr(self , class_fields[0].name )
_UpperCAmelCase : Any = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(lowerCAmelCase__ ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : str = first_field.items()
_UpperCAmelCase : List[str] = True
else:
try:
_UpperCAmelCase : Tuple = iter(lowerCAmelCase__ )
_UpperCAmelCase : Dict = True
except TypeError:
_UpperCAmelCase : Union[str, Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(lowerCAmelCase__ ):
if (
not isinstance(lowerCAmelCase__ , (list, tuple) )
or not len(lowerCAmelCase__ ) == 2
or not isinstance(element[0] , lowerCAmelCase__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
_UpperCAmelCase : str = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
_UpperCAmelCase : int = element[1]
elif first_field is not None:
_UpperCAmelCase : Optional[Any] = first_field
else:
for field in class_fields:
_UpperCAmelCase : Optional[Any] = getattr(self , field.name )
if v is not None:
_UpperCAmelCase : Dict = v
def __delitem__( self : str , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : int ) -> Optional[int]:
"""simple docstring"""
raise Exception(F"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def _lowerCAmelCase ( self : Optional[int] , *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
raise Exception(F"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def _lowerCAmelCase ( self : Tuple , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
raise Exception(F"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def _lowerCAmelCase ( self : Any , *lowerCAmelCase__ : Optional[int] , **lowerCAmelCase__ : Any ) -> Tuple:
"""simple docstring"""
raise Exception(F"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : Union[str, Any] , lowerCAmelCase__ : List[str] ) -> str:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(lowerCAmelCase__ , lowerCAmelCase__ )
super().__setattr__(lowerCAmelCase__ , lowerCAmelCase__ )
def __setitem__( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] ) -> str:
"""simple docstring"""
super().__setitem__(lowerCAmelCase__ , lowerCAmelCase__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> Tuple[Any]:
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class A__ ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
@classmethod
def _lowerCAmelCase ( cls : List[str] , lowerCAmelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
raise ValueError(
F"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : str = '''longest'''
UpperCamelCase_ : Dict = '''max_length'''
UpperCamelCase_ : Tuple = '''do_not_pad'''
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : str = '''pt'''
UpperCamelCase_ : int = '''tf'''
UpperCamelCase_ : int = '''np'''
UpperCamelCase_ : Tuple = '''jax'''
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : List[ContextManager] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Tuple = context_managers
_UpperCAmelCase : List[Any] = ExitStack()
def __enter__( self : Any ) -> Tuple:
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(lowerCAmelCase__ )
def __exit__( self : int , *lowerCAmelCase__ : Optional[Any] , **lowerCAmelCase__ : List[str] ) -> str:
"""simple docstring"""
self.stack.__exit__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __UpperCAmelCase ( a_: Union[str, Any] ):
_UpperCAmelCase : int = infer_framework(a_ )
if framework == "tf":
_UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCAmelCase : Union[str, Any] = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCAmelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : Optional[int] = model_class.__name__
_UpperCAmelCase : Optional[int] = infer_framework(a_ )
if framework == "tf":
_UpperCAmelCase : List[str] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
_UpperCAmelCase : Optional[int] = inspect.signature(model_class.forward ) # PyTorch models
else:
_UpperCAmelCase : Tuple = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def __UpperCAmelCase ( a_: MutableMapping, a_: str = "", a_: str = "." ):
def _flatten_dict(a_: Dict, a_: Any="", a_: Optional[int]="." ):
for k, v in d.items():
_UpperCAmelCase : Dict = str(a_ ) + delimiter + str(a_ ) if parent_key else k
if v and isinstance(a_, a_ ):
yield from flatten_dict(a_, a_, delimiter=a_ ).items()
else:
yield key, v
return dict(_flatten_dict(a_, a_, a_ ) )
@contextmanager
def __UpperCAmelCase ( a_: Any, a_: bool = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def __UpperCAmelCase ( a_: Optional[int], a_: Optional[int]=None ):
if is_numpy_array(a_ ):
return np.transpose(a_, axes=a_ )
elif is_torch_tensor(a_ ):
return array.T if axes is None else array.permute(*a_ )
elif is_tf_tensor(a_ ):
import tensorflow as tf
return tf.transpose(a_, perm=a_ )
elif is_jax_tensor(a_ ):
return jnp.transpose(a_, axes=a_ )
else:
raise ValueError(f"""Type not supported for transpose: {type(a_ )}.""" )
def __UpperCAmelCase ( a_: Union[str, Any], a_: str ):
if is_numpy_array(a_ ):
return np.reshape(a_, a_ )
elif is_torch_tensor(a_ ):
return array.reshape(*a_ )
elif is_tf_tensor(a_ ):
import tensorflow as tf
return tf.reshape(a_, a_ )
elif is_jax_tensor(a_ ):
return jnp.reshape(a_, a_ )
else:
raise ValueError(f"""Type not supported for reshape: {type(a_ )}.""" )
def __UpperCAmelCase ( a_: List[Any], a_: Any=None ):
if is_numpy_array(a_ ):
return np.squeeze(a_, axis=a_ )
elif is_torch_tensor(a_ ):
return array.squeeze() if axis is None else array.squeeze(dim=a_ )
elif is_tf_tensor(a_ ):
import tensorflow as tf
return tf.squeeze(a_, axis=a_ )
elif is_jax_tensor(a_ ):
return jnp.squeeze(a_, axis=a_ )
else:
raise ValueError(f"""Type not supported for squeeze: {type(a_ )}.""" )
def __UpperCAmelCase ( a_: List[Any], a_: Tuple ):
if is_numpy_array(a_ ):
return np.expand_dims(a_, a_ )
elif is_torch_tensor(a_ ):
return array.unsqueeze(dim=a_ )
elif is_tf_tensor(a_ ):
import tensorflow as tf
return tf.expand_dims(a_, axis=a_ )
elif is_jax_tensor(a_ ):
return jnp.expand_dims(a_, axis=a_ )
else:
raise ValueError(f"""Type not supported for expand_dims: {type(a_ )}.""" )
def __UpperCAmelCase ( a_: List[str] ):
if is_numpy_array(a_ ):
return np.size(a_ )
elif is_torch_tensor(a_ ):
return array.numel()
elif is_tf_tensor(a_ ):
import tensorflow as tf
return tf.size(a_ )
elif is_jax_tensor(a_ ):
return array.size
else:
raise ValueError(f"""Type not supported for expand_dims: {type(a_ )}.""" )
def __UpperCAmelCase ( a_: int, a_: Optional[Any] ):
for key, value in auto_map.items():
if isinstance(a_, (tuple, list) ):
_UpperCAmelCase : List[Any] = [f"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
_UpperCAmelCase : Optional[Any] = f"""{repo_id}--{value}"""
return auto_map
def __UpperCAmelCase ( a_: Dict ):
for base_class in inspect.getmro(a_ ):
_UpperCAmelCase : Optional[Any] = base_class.__module__
_UpperCAmelCase : Any = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f"""Could not infer framework from class {model_class}.""" ) | 494 | '''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = '''blenderbot-small'''
UpperCamelCase_ : Tuple = ['''past_key_values''']
UpperCamelCase_ : Optional[int] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , lowerCAmelCase__ : Dict=5_0_2_6_5 , lowerCAmelCase__ : Optional[int]=5_1_2 , lowerCAmelCase__ : Optional[Any]=8 , lowerCAmelCase__ : str=2_0_4_8 , lowerCAmelCase__ : Optional[int]=1_6 , lowerCAmelCase__ : List[str]=8 , lowerCAmelCase__ : Optional[int]=2_0_4_8 , lowerCAmelCase__ : List[Any]=1_6 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Any=5_1_2 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : str=2 , **lowerCAmelCase__ : Optional[Any] , ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Tuple = max_position_embeddings
_UpperCAmelCase : Optional[Any] = d_model
_UpperCAmelCase : List[str] = encoder_ffn_dim
_UpperCAmelCase : Union[str, Any] = encoder_layers
_UpperCAmelCase : List[Any] = encoder_attention_heads
_UpperCAmelCase : Tuple = decoder_ffn_dim
_UpperCAmelCase : Union[str, Any] = decoder_layers
_UpperCAmelCase : Union[str, Any] = decoder_attention_heads
_UpperCAmelCase : List[Any] = dropout
_UpperCAmelCase : int = attention_dropout
_UpperCAmelCase : int = activation_dropout
_UpperCAmelCase : Union[str, Any] = activation_function
_UpperCAmelCase : Any = init_std
_UpperCAmelCase : Optional[Any] = encoder_layerdrop
_UpperCAmelCase : Tuple = decoder_layerdrop
_UpperCAmelCase : List[Any] = use_cache
_UpperCAmelCase : str = encoder_layers
_UpperCAmelCase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
class A__ ( UpperCamelCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_UpperCAmelCase : Tuple = {0: "batch"}
_UpperCAmelCase : Union[str, Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_UpperCAmelCase : Dict = {0: "batch", 1: "decoder_sequence"}
_UpperCAmelCase : int = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_UpperCAmelCase : str = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.num_layers
for i in range(lowerCAmelCase__ ):
_UpperCAmelCase : str = {0: "batch", 2: "past_sequence + sequence"}
_UpperCAmelCase : List[str] = {0: "batch", 2: "past_sequence + sequence"}
else:
_UpperCAmelCase : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _lowerCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase : Any = super().outputs
else:
_UpperCAmelCase : Union[str, Any] = super(lowerCAmelCase__ , self ).outputs
if self.use_past:
_UpperCAmelCase , _UpperCAmelCase : int = self.num_layers
for i in range(lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = {0: "batch", 2: "past_sequence + sequence"}
_UpperCAmelCase : str = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Generate decoder inputs
_UpperCAmelCase : Any = seq_length if not self.use_past else 1
_UpperCAmelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_UpperCAmelCase : Union[str, Any] = dict(**lowerCAmelCase__ , **lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase : Dict = common_inputs["input_ids"].shape
_UpperCAmelCase : Optional[Any] = common_inputs["decoder_input_ids"].shape[1]
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.num_attention_heads
_UpperCAmelCase : List[str] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCAmelCase : Tuple = decoder_seq_length + 3
_UpperCAmelCase : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_UpperCAmelCase : Union[str, Any] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ )] , dim=1 )
_UpperCAmelCase : Tuple = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_UpperCAmelCase , _UpperCAmelCase : str = self.num_layers
_UpperCAmelCase : Optional[int] = min(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = max(lowerCAmelCase__ , lowerCAmelCase__ ) - min_num_layers
_UpperCAmelCase : List[Any] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCAmelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
) )
# TODO: test this.
_UpperCAmelCase : Tuple = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCAmelCase__ , lowerCAmelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) )
return common_inputs
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase : Any = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_UpperCAmelCase : Any = seqlen + 2
_UpperCAmelCase , _UpperCAmelCase : Any = self.num_layers
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.num_attention_heads
_UpperCAmelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCAmelCase : List[str] = common_inputs["attention_mask"].dtype
_UpperCAmelCase : Tuple = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
_UpperCAmelCase : Any = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(lowerCAmelCase__ )
]
return common_inputs
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase : str = tokenizer.num_special_tokens_to_add(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase : Any = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_UpperCAmelCase : Tuple = dict(tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) )
return common_inputs
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
elif self.task == "causal-lm":
_UpperCAmelCase : List[Any] = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
else:
_UpperCAmelCase : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
return common_inputs
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] ) -> str:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase : List[Any] = super()._flatten_past_key_values_(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
_UpperCAmelCase : List[Any] = super(lowerCAmelCase__ , self )._flatten_past_key_values_(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) | 494 | 1 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def snake_case__ ( __lowercase = "laptop" ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict = F'https://www.amazon.in/laptop/s?k={product}'
A__ : Tuple = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
A__ : int = BeautifulSoup(requests.get(_lowerCAmelCase , headers=_lowerCAmelCase ).text )
# Initialize a Pandas dataframe with the column titles
A__ : int = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" , attrs={"class": "s-result-item", "data-component-type": "s-search-result"} , ) , soup.find_all("div" , attrs={"class": "a-row a-size-base a-color-base"} ) , ):
try:
A__ : Dict = item.ha.text
A__ : Optional[int] = "https://www.amazon.in/" + item.ha.a["href"]
A__ : Optional[int] = item.find("span" , attrs={"class": "a-offscreen"} ).text
try:
A__ : Union[str, Any] = item.find("span" , attrs={"class": "a-icon-alt"} ).text
except AttributeError:
A__ : Optional[Any] = "Not available"
try:
A__ : Dict = (
"₹"
+ item.find(
"span" , attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
A__ : Union[str, Any] = ""
try:
A__ : Union[str, Any] = float(
(
(
float(product_mrp.strip("₹" ).replace("," , "" ) )
- float(product_price.strip("₹" ).replace("," , "" ) )
)
/ float(product_mrp.strip("₹" ).replace("," , "" ) )
)
* 1_0_0 )
except ValueError:
A__ : List[str] = float("nan" )
except AttributeError:
pass
A__ : Dict = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A__ : List[str] = " "
A__ : Tuple = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
snake_case : str = 'headphones'
get_amazon_product_data(product).to_csv(f"""Amazon Product Data for {product}.csv""") | 711 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def snake_case__ ( __lowercase ) -> bool:
"""simple docstring"""
A__ : int = int(number**0.5 )
return number == sq * sq
def snake_case__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> tuple[int, int]:
"""simple docstring"""
A__ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
A__ : int = x_den * y_den * z_den
A__ : int = gcd(__lowercase , __lowercase )
top //= hcf
bottom //= hcf
return top, bottom
def snake_case__ ( __lowercase = 3_5 ) -> int:
"""simple docstring"""
A__ : set = set()
A__ : int
A__ : Fraction = Fraction(0 )
A__ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
A__ : Any = x_num * y_den + x_den * y_num
A__ : List[Any] = x_den * y_den
A__ : List[Any] = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : List[Any] = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
# n=2
A__ : Any = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
A__ : Optional[int] = x_den * x_den * y_den * y_den
if is_sq(__lowercase ) and is_sq(__lowercase ):
A__ : Union[str, Any] = int(sqrt(__lowercase ) )
A__ : int = int(sqrt(__lowercase ) )
A__ : Any = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : List[Any] = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
# n=-1
A__ : Tuple = x_num * y_num
A__ : int = x_den * y_num + x_num * y_den
A__ : List[str] = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : str = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
# n=2
A__ : Any = x_num * x_num * y_num * y_num
A__ : List[str] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowercase ) and is_sq(__lowercase ):
A__ : Optional[int] = int(sqrt(__lowercase ) )
A__ : List[Any] = int(sqrt(__lowercase ) )
A__ : Union[str, Any] = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : Optional[Any] = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
for num, den in unique_s:
total += Fraction(__lowercase , __lowercase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""") | 182 | 0 |
def lowerCamelCase_ ( __UpperCamelCase ):
A_ = [1]
A_ , A_ , A_ = 0, 0, 0
A_ = ugly_nums[ia] * 2
A_ = ugly_nums[ia] * 3
A_ = ugly_nums[ia] * 5
for _ in range(1 , __UpperCamelCase ):
A_ = min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
ugly_nums.append(__UpperCamelCase )
if next_num == next_a:
ia += 1
A_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
A_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
A_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''') | 141 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
class __lowercase ( A ):
__magic_name__ : Any = '''sequence-classification'''
def __init__( self , a__ ) -> Optional[Any]:
'''simple docstring'''
if type(a__ ) == dict:
A_ = Namespace(**a__ )
A_ = glue_output_modes[hparams.task]
A_ = glue_tasks_num_labels[hparams.task]
super().__init__(a__ , a__ , self.mode )
def lowerCAmelCase_ ( self , **a__ ) -> List[Any]:
'''simple docstring'''
return self.model(**a__ )
def lowerCAmelCase_ ( self , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
A_ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A_ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
A_ = self(**a__ )
A_ = outputs[0]
A_ = self.trainer.lr_schedulers[0]['''scheduler''']
A_ = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
A_ = self.hparams
A_ = processors[args.task]()
A_ = processor.get_labels()
for mode in ["train", "dev"]:
A_ = self._feature_file(a__ )
if os.path.exists(a__ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , a__ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
A_ = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
A_ = convert_examples_to_features(
a__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(a__ , a__ )
def lowerCAmelCase_ ( self , a__ , a__ , a__ = False ) -> DataLoader:
'''simple docstring'''
A_ = '''dev''' if mode == '''test''' else mode
A_ = self._feature_file(a__ )
logger.info('''Loading features from cached file %s''' , a__ )
A_ = torch.load(a__ )
A_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
A_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
A_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
A_ = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
A_ = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a__ , a__ , a__ , a__ ) , batch_size=a__ , shuffle=a__ , )
def lowerCAmelCase_ ( self , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A_ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
A_ = self(**a__ )
A_ , A_ = outputs[:2]
A_ = logits.detach().cpu().numpy()
A_ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase_ ( self , a__ ) -> tuple:
'''simple docstring'''
A_ = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
A_ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
A_ = np.argmax(a__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
A_ = np.squeeze(a__ )
A_ = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
A_ = [[] for _ in range(out_label_ids.shape[0] )]
A_ = [[] for _ in range(out_label_ids.shape[0] )]
A_ = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , a__ , a__ )}
A_ = dict(results.items() )
A_ = results
return ret, preds_list, out_label_list
def lowerCAmelCase_ ( self , a__ ) -> dict:
'''simple docstring'''
A_ , A_ , A_ = self._eval_end(a__ )
A_ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase_ ( self , a__ ) -> dict:
'''simple docstring'''
A_ , A_ , A_ = self._eval_end(a__ )
A_ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase_ ( a__ , a__ ) -> Dict:
'''simple docstring'''
BaseTransformer.add_model_specific_args(a__ , a__ )
parser.add_argument(
'''--max_seq_length''' , default=1_2_8 , type=a__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=a__ , required=a__ , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=a__ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def lowerCamelCase_ ( ):
A_ = argparse.ArgumentParser()
add_generic_args(__UpperCamelCase , os.getcwd() )
A_ = GLUETransformer.add_model_specific_args(__UpperCamelCase , os.getcwd() )
A_ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
A_ = os.path.join(
'''./results''' , F"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
A_ = GLUETransformer(__UpperCamelCase )
A_ = generic_train(__UpperCamelCase , __UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
A_ = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=__UpperCamelCase ) )
A_ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__UpperCamelCase )
if __name__ == "__main__":
main() | 141 | 1 |
import os
def __lowerCAmelCase ( ) -> Tuple:
__lowerCamelCase: Union[str, Any] = os.path.dirname(os.path.realpath(snake_case ) )
__lowerCamelCase: List[str] = os.path.join(snake_case , """triangle.txt""" )
with open(snake_case ) as f:
__lowerCamelCase: Any = f.readlines()
__lowerCamelCase: Union[str, Any] = []
for line in triangle:
__lowerCamelCase: List[Any] = []
for number in line.strip().split(""" """ ):
numbers_from_line.append(int(snake_case ) )
a.append(snake_case )
for i in range(1 , len(snake_case ) ):
for j in range(len(a[i] ) ):
__lowerCamelCase: int = a[i - 1][j] if j != len(a[i - 1] ) else 0
__lowerCamelCase: Any = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(snake_case , snake_case )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 712 |
class a :
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : str = "" , SCREAMING_SNAKE_CASE_ : bool = False ):
# Mapping from the first character of the prefix of the node
__lowerCamelCase: dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
__lowerCamelCase: str = is_leaf
__lowerCamelCase: Optional[int] = prefix
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
__lowerCamelCase: Optional[Any] = 0
for q, w in zip(self.prefix , SCREAMING_SNAKE_CASE_ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : list[str] ):
for word in words:
self.insert(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
__lowerCamelCase: Union[str, Any] = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__lowerCamelCase: Any = RadixNode(prefix=SCREAMING_SNAKE_CASE_ , is_leaf=SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase: Union[str, Any] = self.nodes[word[0]]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase: List[str] = incoming_node.match(
SCREAMING_SNAKE_CASE_ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE_ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__lowerCamelCase: List[Any] = remaining_prefix
__lowerCamelCase: Optional[Any] = self.nodes[matching_string[0]]
__lowerCamelCase: Optional[int] = RadixNode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Union[str, Any] = aux_node
if remaining_word == "":
__lowerCamelCase: Optional[int] = True
else:
self.nodes[matching_string[0]].insert(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : str ):
__lowerCamelCase: int = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE_ )
if not incoming_node:
return False
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Any = incoming_node.match(
SCREAMING_SNAKE_CASE_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : str ):
__lowerCamelCase: str = self.nodes.get(word[0] , SCREAMING_SNAKE_CASE_ )
if not incoming_node:
return False
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Dict = incoming_node.match(
SCREAMING_SNAKE_CASE_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(SCREAMING_SNAKE_CASE_ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__lowerCamelCase: List[Any] = list(self.nodes.values() )[0]
__lowerCamelCase: Any = merging_node.is_leaf
self.prefix += merging_node.prefix
__lowerCamelCase: Tuple = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__lowerCamelCase: int = False
# If there is 1 edge, we merge it with its child
else:
__lowerCamelCase: Union[str, Any] = list(incoming_node.nodes.values() )[0]
__lowerCamelCase: List[str] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__lowerCamelCase: Union[str, Any] = merging_node.nodes
return True
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int = 0 ):
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def __lowerCAmelCase ( ) -> bool:
__lowerCamelCase: Optional[int] = """banana bananas bandana band apple all beast""".split()
__lowerCamelCase: Optional[Any] = RadixNode()
root.insert_many(snake_case )
assert all(root.find(snake_case ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def __lowerCAmelCase ( ) -> None:
assert test_trie()
def __lowerCAmelCase ( ) -> None:
__lowerCamelCase: int = RadixNode()
__lowerCamelCase: str = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(snake_case )
print("""Words:""" , snake_case )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 189 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCamelCase (nn.Module ):
_SCREAMING_SNAKE_CASE : Any = 42
_SCREAMING_SNAKE_CASE : List[Any] = jnp.floataa
def __snake_case ( self :Tuple ) ->str:
lowercase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self :Any , __magic_name__ :Any ) ->Any:
lowercase , lowercase , lowercase , lowercase : str = hidden_states.shape
lowercase : int = jax.image.resize(
__A , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
lowercase : List[str] = self.conv(__A )
return hidden_states
class UpperCamelCase (nn.Module ):
_SCREAMING_SNAKE_CASE : Optional[Any] = 42
_SCREAMING_SNAKE_CASE : int = jnp.floataa
def __snake_case ( self :Union[str, Any] ) ->Dict:
lowercase : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self :Any , __magic_name__ :Union[str, Any] ) ->Tuple:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
lowercase : int = self.conv(__A )
return hidden_states
class UpperCamelCase (nn.Module ):
_SCREAMING_SNAKE_CASE : Tuple = 42
_SCREAMING_SNAKE_CASE : List[str] = None
_SCREAMING_SNAKE_CASE : Dict = 0.0
_SCREAMING_SNAKE_CASE : Any = None
_SCREAMING_SNAKE_CASE : int = jnp.floataa
def __snake_case ( self :Optional[Any] ) ->Dict:
lowercase : Union[str, Any] = self.in_channels if self.out_channels is None else self.out_channels
lowercase : str = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowercase : Tuple = nn.Conv(
__A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase : Optional[int] = nn.Dense(__A , dtype=self.dtype )
lowercase : List[str] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowercase : List[str] = nn.Dropout(self.dropout_prob )
lowercase : Union[str, Any] = nn.Conv(
__A , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowercase : Optional[Any] = None
if use_nin_shortcut:
lowercase : List[Any] = nn.Conv(
__A , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self :Optional[int] , __magic_name__ :Tuple , __magic_name__ :Dict , __magic_name__ :List[Any]=True ) ->Optional[Any]:
lowercase : Optional[Any] = hidden_states
lowercase : Union[str, Any] = self.norma(__A )
lowercase : str = nn.swish(__A )
lowercase : int = self.conva(__A )
lowercase : int = self.time_emb_proj(nn.swish(__A ) )
lowercase : Union[str, Any] = jnp.expand_dims(jnp.expand_dims(__A , 1 ) , 1 )
lowercase : str = hidden_states + temb
lowercase : int = self.norma(__A )
lowercase : Any = nn.swish(__A )
lowercase : List[Any] = self.dropout(__A , __A )
lowercase : Dict = self.conva(__A )
if self.conv_shortcut is not None:
lowercase : Union[str, Any] = self.conv_shortcut(__A )
return hidden_states + residual
| 264 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__A = logging.get_logger('transformers.models.encodec')
__A = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
__A = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
__A = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
__A = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
__A = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
__A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__A = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__A = []
__A = []
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
for attribute in key.split('''.''' ):
_A = getattr(_lowercase , _lowercase )
if weight_type is not None:
_A = getattr(_lowercase , _lowercase ).shape
else:
_A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_A = value
elif weight_type == "weight_g":
_A = value
elif weight_type == "weight_v":
_A = value
elif weight_type == "bias":
_A = value
elif weight_type == "running_mean":
_A = value
elif weight_type == "running_var":
_A = value
elif weight_type == "num_batches_tracked":
_A = value
elif weight_type == "weight_ih_l0":
_A = value
elif weight_type == "weight_hh_l0":
_A = value
elif weight_type == "bias_ih_l0":
_A = value
elif weight_type == "bias_hh_l0":
_A = value
elif weight_type == "weight_ih_l1":
_A = value
elif weight_type == "weight_hh_l1":
_A = value
elif weight_type == "bias_ih_l1":
_A = value
elif weight_type == "bias_hh_l1":
_A = value
else:
_A = value
logger.info(f"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_A ,_A = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = []
if model_name == "encodec_24khz" or "encodec_32khz":
_A = MAPPING_24K
elif model_name == "encodec_48khz":
_A = MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(_lowercase , _lowercase ):
logger.info(f"""{name} was ignored""" )
continue
_A = False
for key, mapped_key in MAPPING.items():
if "*" in key:
_A ,_A = key.split('''.*.''' )
if prefix in name and suffix in name:
_A = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
_A = True
if "*" in mapped_key:
_A = name.split(_lowercase )[0].split('''.''' )[-2]
_A = mapped_key.replace('''*''' , _lowercase )
if "weight_g" in name:
_A = '''weight_g'''
elif "weight_v" in name:
_A = '''weight_v'''
elif "weight_ih_l0" in name:
_A = '''weight_ih_l0'''
elif "weight_hh_l0" in name:
_A = '''weight_hh_l0'''
elif "bias_ih_l0" in name:
_A = '''bias_ih_l0'''
elif "bias_hh_l0" in name:
_A = '''bias_hh_l0'''
elif "weight_ih_l1" in name:
_A = '''weight_ih_l1'''
elif "weight_hh_l1" in name:
_A = '''weight_hh_l1'''
elif "bias_ih_l1" in name:
_A = '''bias_ih_l1'''
elif "bias_hh_l1" in name:
_A = '''bias_hh_l1'''
elif "bias" in name:
_A = '''bias'''
elif "weight" in name:
_A = '''weight'''
elif "running_mean" in name:
_A = '''running_mean'''
elif "running_var" in name:
_A = '''running_var'''
elif "num_batches_tracked" in name:
_A = '''num_batches_tracked'''
else:
_A = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def __A ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , ):
'''simple docstring'''
if config_path is not None:
_A = EncodecConfig.from_pretrained(_lowercase )
else:
_A = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
_A = [8, 5, 4, 4]
_A = [2.2]
_A = 64
_A = 3_20_00
_A = 20_48
_A = False
_A = False
_A = False
elif model_name == "encodec_48khz":
_A = [8, 5, 4, 2]
_A = [3.0, 6.0, 12.0, 24.0]
_A = 4_80_00
_A = 2
_A = False
_A = '''time_group_norm'''
_A = True
_A = 1.0
_A = 0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
_A = EncodecModel(_lowercase )
_A = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(_lowercase )
_A = torch.load(_lowercase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
_A = original_checkpoint['''best_state''']
recursively_load_weights(_lowercase , _lowercase , _lowercase )
model.save_pretrained(_lowercase )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(_lowercase )
model.push_to_hub(_lowercase )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__A = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 484 | 0 |
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class lowerCamelCase_ ( unittest.TestCase ):
_lowerCAmelCase : List[str] = JukeboxTokenizer
_lowerCAmelCase : str = {
'artist': 'Zac Brown Band',
'genres': 'Country',
'lyrics': 'I met a traveller from an antique land,\n Who said "Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ',
}
@require_torch
def __lowercase ( self : Optional[int] ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE : Dict = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
SCREAMING_SNAKE_CASE : int = tokenizer(**self.metas )["input_ids"]
# fmt: off
SCREAMING_SNAKE_CASE : List[str] = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __lowercase ( self : List[Any] ):
"""simple docstring"""
import torch
SCREAMING_SNAKE_CASE : Tuple = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(**self.metas )["input_ids"]
# fmt: off
SCREAMING_SNAKE_CASE : Optional[int] = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 713 |
'''simple docstring'''
import string
from math import logaa
def UpperCAmelCase ( A : str , A : str ):
SCREAMING_SNAKE_CASE : Optional[Any] = document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
SCREAMING_SNAKE_CASE : Tuple = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCAmelCase ( A : str , A : str ):
SCREAMING_SNAKE_CASE : int = corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
SCREAMING_SNAKE_CASE : Any = corpus_without_punctuation.split('''\n''' )
SCREAMING_SNAKE_CASE : Optional[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(A ))
def UpperCAmelCase ( A : int , A : int , A : str=False ):
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def UpperCAmelCase ( A : int , A : int ):
return round(tf * idf , 3 )
| 464 | 0 |
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 471 |
import random
def lowerCamelCase_ ( UpperCamelCase_ ):
_a : str = num - 1
_a : int = 0
while s % 2 == 0:
_a : Optional[int] = s // 2
t += 1
for _ in range(5 ):
_a : int = random.randrange(2 , num - 1 )
_a : Tuple = pow(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if v != 1:
_a : str = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_a : str = i + 1
_a : str = (v**2) % num
return True
def lowerCamelCase_ ( UpperCamelCase_ ):
if num < 2:
return False
_a : Optional[int] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ = 1024 ):
while True:
_a : Optional[int] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(UpperCamelCase_ ):
return num
if __name__ == "__main__":
__UpperCAmelCase : Union[str, Any] = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 471 | 1 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __UpperCamelCase ( a : List[Any] ) ->List[str]:
snake_case = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a , a )
def __UpperCamelCase ( a : int ) ->Union[str, Any]:
snake_case , snake_case = emb.weight.shape
snake_case = nn.Linear(a , a , bias=a )
snake_case = emb.weight.data
return lin_layer
def __UpperCamelCase ( a : List[str] ) ->List[str]:
snake_case = torch.load(a , map_location='''cpu''' )
snake_case = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
snake_case = mam_aaa['''model''']
remove_ignore_keys_(a )
snake_case = state_dict['''encoder.embed_tokens.weight'''].shape[0]
snake_case = MaMaaaConfig(
vocab_size=a , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
snake_case = state_dict['''decoder.embed_tokens.weight''']
snake_case = MaMaaaForConditionalGeneration(a )
model.model.load_state_dict(a , strict=a )
snake_case = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_lowercase = parser.parse_args()
_lowercase = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 44 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _lowercase ( yaml.SafeLoader ):
def UpperCamelCase ( self , A__ ) -> List[str]:
snake_case = [self.constructed_objects[key_node] for key_node, _ in node.value]
snake_case = [tuple(A__ ) if isinstance(A__ , A__ ) else key for key in keys]
snake_case = Counter(A__ )
snake_case = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCamelCase ( self , A__ , A__=False ) -> List[Any]:
snake_case = super().construct_mapping(A__ , deep=A__ )
self._check_no_duplicates_on_constructed_node(A__ )
return mapping
def __UpperCamelCase ( a : str ) ->Tuple[Optional[str], str]:
snake_case = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
snake_case = full_content[1:].index('''---''' ) + 1
snake_case = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(a )
class _lowercase ( __a ):
# class attributes
_UpperCAmelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata":
with open(A__ , encoding='''utf-8''' ) as readme_file:
snake_case , snake_case = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(A__ )
else:
return cls()
def UpperCamelCase ( self , A__ ) -> str:
if path.exists():
with open(A__ , encoding='''utf-8''' ) as readme_file:
snake_case = readme_file.read()
else:
snake_case = None
snake_case = self._to_readme(A__ )
with open(A__ , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(A__ )
def UpperCamelCase ( self , A__ = None ) -> str:
if readme_content is not None:
snake_case , snake_case = _split_yaml_from_readme(A__ )
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata":
snake_case = yaml.load(A__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
snake_case = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**A__ )
def UpperCamelCase ( self ) -> str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=A__ , allow_unicode=A__ , encoding='''utf-8''' , ).decode('''utf-8''' )
_lowercase = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_lowercase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
_lowercase = ap.parse_args()
_lowercase = Path(args.readme_filepath)
_lowercase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 44 | 1 |
"""simple docstring"""
def _a ( _snake_case ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
_UpperCamelCase = int(input("""Enter number: """).strip())
print(F"""{number} is {"" if perfect(number) else "not "}a Perfect Number.""")
| 341 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCamelCase__ :
def __init__( self ,A = None ):
if components is None:
UpperCAmelCase = []
UpperCAmelCase = list(A )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(A ,self.__components ) ) + ")"
def __add__( self ,A ):
UpperCAmelCase = len(self )
if size == len(A ):
UpperCAmelCase = [self.__components[i] + other.component(A ) for i in range(A )]
return Vector(A )
else:
raise Exception("""must have the same size""" )
def __sub__( self ,A ):
UpperCAmelCase = len(self )
if size == len(A ):
UpperCAmelCase = [self.__components[i] - other.component(A ) for i in range(A )]
return Vector(A )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self ,A ):
...
@overload
def __mul__( self ,A ):
...
def __mul__( self ,A ):
if isinstance(A ,(float, int) ):
UpperCAmelCase = [c * other for c in self.__components]
return Vector(A )
elif isinstance(A ,A ) and len(self ) == len(A ):
UpperCAmelCase = len(self )
UpperCAmelCase = [self.__components[i] * other.component(A ) for i in range(A )]
return sum(A )
else: # error case
raise Exception("""invalid operand!""" )
def _UpperCamelCase ( self ):
return Vector(self.__components )
def _UpperCamelCase ( self ,A ):
if isinstance(A ,A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def _UpperCamelCase ( self ,A ,A ):
assert -len(self.__components ) <= pos < len(self.__components )
UpperCAmelCase = value
def _UpperCamelCase ( self ):
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
UpperCAmelCase = [c**2 for c in self.__components]
return math.sqrt(sum(A ) )
def _UpperCamelCase ( self ,A ,A = False ):
UpperCAmelCase = self * other
UpperCAmelCase = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _a ( _snake_case ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case )
return Vector([0] * dimension )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case ) and (isinstance(_snake_case , _snake_case ))
UpperCAmelCase = [0] * dimension
UpperCAmelCase = 1
return Vector(_snake_case )
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
assert (
isinstance(_snake_case , _snake_case )
and isinstance(_snake_case , _snake_case )
and (isinstance(_snake_case , (int, float) ))
)
return x * scalar + y
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
random.seed(_snake_case )
UpperCAmelCase = [random.randint(_snake_case , _snake_case ) for _ in range(_snake_case )]
return Vector(_snake_case )
class lowerCamelCase__ :
def __init__( self ,A ,A ,A ):
UpperCAmelCase = matrix
UpperCAmelCase = w
UpperCAmelCase = h
def __str__( self ):
UpperCAmelCase = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self ,A ):
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase = []
for i in range(self.__height ):
UpperCAmelCase = [
self.__matrix[i][j] + other.component(A ,A )
for j in range(self.__width )
]
matrix.append(A )
return Matrix(A ,self.__width ,self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self ,A ):
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase = []
for i in range(self.__height ):
UpperCAmelCase = [
self.__matrix[i][j] - other.component(A ,A )
for j in range(self.__width )
]
matrix.append(A )
return Matrix(A ,self.__width ,self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self ,A ):
...
@overload
def __mul__( self ,A ):
...
def __mul__( self ,A ):
if isinstance(A ,A ): # matrix-vector
if len(A ) == self.__width:
UpperCAmelCase = zero_vector(self.__height )
for i in range(self.__height ):
UpperCAmelCase = [
self.__matrix[i][j] * other.component(A )
for j in range(self.__width )
]
ans.change_component(A ,sum(A ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(A ,(int, float) ): # matrix-scalar
UpperCAmelCase = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A ,self.__width ,self.__height )
return None
def _UpperCamelCase ( self ):
return self.__height
def _UpperCamelCase ( self ):
return self.__width
def _UpperCamelCase ( self ,A ,A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def _UpperCamelCase ( self ,A ,A ,A ):
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCAmelCase = value
else:
raise Exception("""change_component: indices out of bounds""" )
def _UpperCamelCase ( self ,A ,A ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
UpperCAmelCase = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A ) ):
UpperCAmelCase = minor[i][:y] + minor[i][y + 1 :]
return Matrix(A ,self.__width - 1 ,self.__height - 1 ).determinant()
def _UpperCamelCase ( self ,A ,A ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A ,A )
else:
raise Exception("""Indices out of bounds""" )
def _UpperCamelCase ( self ):
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCAmelCase = [
self.__matrix[0][y] * self.cofactor(0 ,A ) for y in range(self.__width )
]
return sum(A )
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = [[0] * n for _ in range(_snake_case )]
return Matrix(_snake_case , _snake_case , _snake_case )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
random.seed(_snake_case )
UpperCAmelCase = [
[random.randint(_snake_case , _snake_case ) for _ in range(_snake_case )] for _ in range(_snake_case )
]
return Matrix(_snake_case , _snake_case , _snake_case )
| 341 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : int = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 707 |
import argparse
import copy
def UpperCamelCase__ ( _A: Dict ):
'''simple docstring'''
__lowerCamelCase = {}
with open(_A ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__lowerCamelCase = []
_list.append([line.split()[1], line.split()[2]] )
__lowerCamelCase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__lowerCamelCase = []
_list.append([line.split()[0], line.split()[2]] )
__lowerCamelCase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def UpperCamelCase__ ( _A: str , _A: Optional[int] ):
'''simple docstring'''
with open(_A ) as f:
__lowerCamelCase = f.read(1 )
__lowerCamelCase = start_node
__lowerCamelCase = []
__lowerCamelCase = start_node
__lowerCamelCase = 0
while visiting not in first_solution:
__lowerCamelCase = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_A ) and k[0] not in first_solution:
__lowerCamelCase = k[1]
__lowerCamelCase = k[0]
first_solution.append(_A )
__lowerCamelCase = distance_of_first_solution + int(_A )
__lowerCamelCase = best_node
first_solution.append(_A )
__lowerCamelCase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__lowerCamelCase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def UpperCamelCase__ ( _A: str , _A: Tuple ):
'''simple docstring'''
__lowerCamelCase = []
for n in solution[1:-1]:
__lowerCamelCase = solution.index(_A )
for kn in solution[1:-1]:
__lowerCamelCase = solution.index(_A )
if n == kn:
continue
__lowerCamelCase = copy.deepcopy(_A )
__lowerCamelCase = kn
__lowerCamelCase = n
__lowerCamelCase = 0
for k in _tmp[:-1]:
__lowerCamelCase = _tmp[_tmp.index(_A ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__lowerCamelCase = distance + int(i[1] )
_tmp.append(_A )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__lowerCamelCase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _A : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def UpperCamelCase__ ( _A: Optional[int] , _A: List[str] , _A: Union[str, Any] , _A: Any , _A: List[str] ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = first_solution
__lowerCamelCase = []
__lowerCamelCase = distance_of_first_solution
__lowerCamelCase = solution
while count <= iters:
__lowerCamelCase = find_neighborhood(_A , _A )
__lowerCamelCase = 0
__lowerCamelCase = neighborhood[index_of_best_solution]
__lowerCamelCase = len(_A ) - 1
__lowerCamelCase = False
while not found:
__lowerCamelCase = 0
while i < len(_A ):
if best_solution[i] != solution[i]:
__lowerCamelCase = best_solution[i]
__lowerCamelCase = solution[i]
break
__lowerCamelCase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__lowerCamelCase = True
__lowerCamelCase = best_solution[:-1]
__lowerCamelCase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__lowerCamelCase = cost
__lowerCamelCase = solution
else:
__lowerCamelCase = index_of_best_solution + 1
__lowerCamelCase = neighborhood[index_of_best_solution]
if len(_A ) >= size:
tabu_list.pop(0 )
__lowerCamelCase = count + 1
return best_solution_ever, best_cost
def UpperCamelCase__ ( _A: str=None ):
'''simple docstring'''
__lowerCamelCase = generate_neighbours(args.File )
__lowerCamelCase , __lowerCamelCase = generate_first_solution(
args.File , _A )
__lowerCamelCase , __lowerCamelCase = tabu_search(
_A , _A , _A , args.Iterations , args.Size , )
print(f'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_a : Union[str, Any] = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 571 | 0 |
"""simple docstring"""
import heapq
import sys
import numpy as np
UpperCamelCase__ :str = tuple[int, int]
class A:
"""simple docstring"""
def __init__( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Optional[int] = []
_UpperCamelCase :Optional[Any] = set()
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''' )
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
return len(self.elements ) == 0
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(SCREAMING_SNAKE_CASE__ )
else:
# update
# print("update", item)
_UpperCamelCase :Optional[Any] = []
((_UpperCamelCase) , (_UpperCamelCase)) :Tuple = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((_UpperCamelCase) , (_UpperCamelCase)) :Any = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
if item in self.set:
self.set.remove(SCREAMING_SNAKE_CASE__ )
_UpperCamelCase :int = []
((_UpperCamelCase) , (_UpperCamelCase)) :Optional[Any] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((_UpperCamelCase) , (_UpperCamelCase)) :Optional[Any] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
return self.elements[0][1]
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
((_UpperCamelCase) , (_UpperCamelCase)) :Tuple = heapq.heappop(self.elements )
self.set.remove(SCREAMING_SNAKE_CASE__ )
return (priority, item)
def A_ ( snake_case__ , snake_case__ ) -> Any:
# euclidean distance
_UpperCamelCase :Union[str, Any] = np.array(snake_case__ )
_UpperCamelCase :Tuple = np.array(snake_case__ )
return np.linalg.norm(a - b )
def A_ ( snake_case__ , snake_case__ ) -> List[str]:
# integer division by time variable
return consistent_heuristic(snake_case__ , snake_case__ ) // t
def A_ ( snake_case__ , snake_case__ ) -> List[str]:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def A_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
_UpperCamelCase :Tuple = g_function[start] + Wa * heuristics[i](snake_case__ , snake_case__ )
return ans
def A_ ( snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
_UpperCamelCase :Union[str, Any] = np.chararray((n, n) )
for i in range(snake_case__ ):
for j in range(snake_case__ ):
_UpperCamelCase :List[Any] = '''*'''
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if (j, (n - 1) - i) in blocks:
_UpperCamelCase :Any = '''#'''
_UpperCamelCase :Tuple = '''-'''
_UpperCamelCase :Dict = back_pointer[goal]
while x != start:
((_UpperCamelCase) , (_UpperCamelCase)) :Tuple = x
# print(x)
_UpperCamelCase :Any = '''-'''
_UpperCamelCase :Optional[int] = back_pointer[x]
_UpperCamelCase :Tuple = '''-'''
for i in range(snake_case__ ):
for j in range(snake_case__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
_UpperCamelCase :int = back_pointer[goal]
while x != start:
print(snake_case__ , end=''' ''' )
_UpperCamelCase :Optional[int] = back_pointer[x]
print(snake_case__ )
sys.exit()
def A_ ( snake_case__ ) -> Dict:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def A_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) -> str:
for itera in range(snake_case__ ):
open_list[itera].remove_element(snake_case__ )
# print("s", s)
# print("j", j)
((_UpperCamelCase) , (_UpperCamelCase)) :Optional[Any] = s
_UpperCamelCase :List[Any] = (x - 1, y)
_UpperCamelCase :Optional[int] = (x + 1, y)
_UpperCamelCase :List[Any] = (x, y + 1)
_UpperCamelCase :List[Any] = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(snake_case__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(snake_case__ )
_UpperCamelCase :str = -1
_UpperCamelCase :str = float('''inf''' )
if valid(snake_case__ ) and g_function[neighbours] > g_function[s] + 1:
_UpperCamelCase :str = g_function[s] + 1
_UpperCamelCase :Optional[int] = s
if neighbours not in close_list_anchor:
open_list[0].put(snake_case__ , key(snake_case__ , 0 , snake_case__ , snake_case__ ) )
if neighbours not in close_list_inad:
for var in range(1 , snake_case__ ):
if key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) <= Wa * key(
snake_case__ , 0 , snake_case__ , snake_case__ ):
open_list[j].put(
snake_case__ , key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) )
def A_ ( ) -> Dict:
_UpperCamelCase :Tuple = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
UpperCamelCase__ :int = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
UpperCamelCase__ :Any = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
UpperCamelCase__ :Union[str, Any] = make_common_ground()
UpperCamelCase__ :int = blocks_blk
# hyper parameters
UpperCamelCase__ :Union[str, Any] = 1
UpperCamelCase__ :Optional[int] = 1
UpperCamelCase__ :Any = 20
UpperCamelCase__ :List[Any] = 3 # one consistent and two other inconsistent
# start and end destination
UpperCamelCase__ :Tuple = (0, 0)
UpperCamelCase__ :int = (n - 1, n - 1)
UpperCamelCase__ :Union[str, Any] = 1
def A_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[Any]:
_UpperCamelCase :List[str] = {start: 0, goal: float('''inf''' )}
_UpperCamelCase :Tuple = {start: -1, goal: -1}
_UpperCamelCase :List[str] = []
_UpperCamelCase :Tuple = set()
for i in range(snake_case__ ):
open_list.append(PriorityQueue() )
open_list[i].put(snake_case__ , key(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) )
_UpperCamelCase :list[int] = []
_UpperCamelCase :list[int] = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , snake_case__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(snake_case__ , snake_case__ , snake_case__ )
else:
_UpperCamelCase , _UpperCamelCase :List[Any] = open_list[i].top_show()
visited.add(snake_case__ )
expand_state(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
close_list_inad.append(snake_case__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(snake_case__ , snake_case__ , snake_case__ )
else:
_UpperCamelCase :Optional[int] = open_list[0].top_show()
visited.add(snake_case__ )
expand_state(
snake_case__ , 0 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , )
close_list_anchor.append(snake_case__ )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(snake_case__ ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 355 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class A( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :List[str] = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 1_28, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 1_42, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
_UpperCamelCase :Union[str, Any] = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 1_28,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 1_42,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase( self ) -> Any:
"""simple docstring"""
_UpperCamelCase :Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , x.transpose() ) )
_UpperCamelCase :Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = np.random.randn(3 , 4 )
_UpperCamelCase :Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
_UpperCamelCase :int = np.random.randn(3 , 4 , 5 )
_UpperCamelCase :Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def _UpperCamelCase( self ) -> Dict:
"""simple docstring"""
_UpperCamelCase :Dict = np.random.randn(3 , 4 )
_UpperCamelCase :List[str] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , transpose(SCREAMING_SNAKE_CASE__ ).numpy() ) )
_UpperCamelCase :int = np.random.randn(3 , 4 , 5 )
_UpperCamelCase :Dict = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def _UpperCamelCase( self ) -> str:
"""simple docstring"""
_UpperCamelCase :int = np.random.randn(3 , 4 )
_UpperCamelCase :Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ ) ) ) )
_UpperCamelCase :Dict = np.random.randn(3 , 4 , 5 )
_UpperCamelCase :Dict = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) , np.asarray(transpose(SCREAMING_SNAKE_CASE__ , axes=(1, 2, 0) ) ) ) )
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :int = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) )
_UpperCamelCase :Dict = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) )
@require_torch
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = np.random.randn(3 , 4 )
_UpperCamelCase :Optional[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
_UpperCamelCase :str = np.random.randn(3 , 4 , 5 )
_UpperCamelCase :str = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_tf
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Optional[int] = np.random.randn(3 , 4 )
_UpperCamelCase :Dict = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ).numpy() ) )
_UpperCamelCase :Union[str, Any] = np.random.randn(3 , 4 , 5 )
_UpperCamelCase :Optional[int] = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ).numpy() ) )
@require_flax
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :List[str] = np.random.randn(3 , 4 )
_UpperCamelCase :str = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (4, 3) ) ) ) )
_UpperCamelCase :Dict = np.random.randn(3 , 4 , 5 )
_UpperCamelCase :Union[str, Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) , np.asarray(reshape(SCREAMING_SNAKE_CASE__ , (12, 5) ) ) ) )
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :Dict = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.squeeze(SCREAMING_SNAKE_CASE__ ) ) )
_UpperCamelCase :List[Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) )
@require_torch
def _UpperCamelCase( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase :str = np.random.randn(1 , 3 , 4 )
_UpperCamelCase :Any = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
_UpperCamelCase :List[str] = np.random.randn(1 , 4 , 1 , 5 )
_UpperCamelCase :List[Any] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_tf
def _UpperCamelCase( self ) -> int:
"""simple docstring"""
_UpperCamelCase :str = np.random.randn(1 , 3 , 4 )
_UpperCamelCase :str = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , squeeze(SCREAMING_SNAKE_CASE__ ).numpy() ) )
_UpperCamelCase :str = np.random.randn(1 , 4 , 1 , 5 )
_UpperCamelCase :str = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ).numpy() ) )
@require_flax
def _UpperCamelCase( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase :Union[str, Any] = np.random.randn(1 , 3 , 4 )
_UpperCamelCase :Optional[Any] = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ ) ) ) )
_UpperCamelCase :Dict = np.random.randn(1 , 4 , 1 , 5 )
_UpperCamelCase :int = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) , np.asarray(squeeze(SCREAMING_SNAKE_CASE__ , axis=2 ) ) ) )
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :Dict = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) )
@require_torch
def _UpperCamelCase( self ) -> Tuple:
"""simple docstring"""
_UpperCamelCase :List[str] = np.random.randn(3 , 4 )
_UpperCamelCase :Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_tf
def _UpperCamelCase( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase :str = np.random.randn(3 , 4 )
_UpperCamelCase :int = tf.constant(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ).numpy() ) )
@require_flax
def _UpperCamelCase( self ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase :Tuple = np.random.randn(3 , 4 )
_UpperCamelCase :str = jnp.array(SCREAMING_SNAKE_CASE__ )
self.assertTrue(np.allclose(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) , np.asarray(expand_dims(SCREAMING_SNAKE_CASE__ , axis=1 ) ) ) )
| 355 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class a_ ( unittest.TestCase ):
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = 10
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = [1, 2, 3, 4]
UpperCamelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_SCREAMING_SNAKE_CASE , self.block_size , 0 ) , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_SCREAMING_SNAKE_CASE , self.block_size , 0 ) , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCamelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_SCREAMING_SNAKE_CASE , self.block_size , 0 ) , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = """It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this."""
UpperCamelCase ,UpperCamelCase = process_story(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , [] )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = """"""
UpperCamelCase ,UpperCamelCase = process_story(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , [] )
self.assertEqual(_SCREAMING_SNAKE_CASE , [] )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = (
"""It was the year of Our Lord one thousand seven hundred and """
"""seventy-five\n\nSpiritual revelations were conceded to England """
"""at that favoured period, as at this.\n@highlight\n\nIt was the best of times"""
)
UpperCamelCase ,UpperCamelCase = process_story(_SCREAMING_SNAKE_CASE )
UpperCamelCase = [
"""It was the year of Our Lord one thousand seven hundred and seventy-five.""",
"""Spiritual revelations were conceded to England at that favoured period, as at this.""",
]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = ["""It was the best of times."""]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = torch.tensor([1, 2, 3, 4] )
UpperCamelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_SCREAMING_SNAKE_CASE , 0 ).numpy() , expected.numpy() )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
UpperCamelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_SCREAMING_SNAKE_CASE , 23 ).numpy() , expected.numpy() )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
UpperCamelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_SCREAMING_SNAKE_CASE , 1 ).numpy() , expected.numpy() )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = 101
UpperCamelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
UpperCamelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
UpperCamelCase = compute_token_type_ids(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
np.testing.assert_array_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 35 |
'''simple docstring'''
from math import sqrt
def lowercase__ ( __UpperCamelCase )-> int:
UpperCamelCase = 0
for i in range(1 , int(sqrt(__UpperCamelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(__UpperCamelCase ):
total += i + n // i
elif i == sqrt(__UpperCamelCase ):
total += i
return total - n
def lowercase__ ( __UpperCamelCase = 10000 )-> int:
UpperCamelCase = sum(
i
for i in range(1 , __UpperCamelCase )
if sum_of_divisors(sum_of_divisors(__UpperCamelCase ) ) == i and sum_of_divisors(__UpperCamelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 35 | 1 |
"""simple docstring"""
def lowercase (_snake_case ) -> str:
'''simple docstring'''
__UpperCamelCase = int(_snake_case )
if decimal in (0, 1): # Exit cases for the recursion
return str(_snake_case )
__UpperCamelCase , __UpperCamelCase = divmod(_snake_case ,2 )
return binary_recursive(_snake_case ) + str(_snake_case )
def lowercase (_snake_case ) -> str:
'''simple docstring'''
__UpperCamelCase = str(_snake_case ).strip()
if not number:
raise ValueError("No input value was provided" )
__UpperCamelCase = "-" if number.startswith("-" ) else ""
__UpperCamelCase = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f"""{negative}0b{binary_recursive(int(_snake_case ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod() | 505 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_A = 10
def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ) -> int:
'''simple docstring'''
for i in range(_snake_case ,_snake_case ):
if array[i] == target:
return i
return -1
def lowercase (_snake_case ,_snake_case ) -> int:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = len(_snake_case )
while left <= right:
if right - left < precision:
return lin_search(_snake_case ,_snake_case ,_snake_case ,_snake_case )
__UpperCamelCase = (left + right) // 3 + 1
__UpperCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__UpperCamelCase = one_third - 1
elif array[two_third] < target:
__UpperCamelCase = two_third + 1
else:
__UpperCamelCase = one_third + 1
__UpperCamelCase = two_third - 1
else:
return -1
def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_snake_case ,_snake_case ,_snake_case ,_snake_case )
__UpperCamelCase = (left + right) // 3 + 1
__UpperCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_snake_case ,one_third - 1 ,_snake_case ,_snake_case )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 ,_snake_case ,_snake_case ,_snake_case )
else:
return rec_ternary_search(one_third + 1 ,two_third - 1 ,_snake_case ,_snake_case )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = input("Enter numbers separated by comma:\n").strip()
_A = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_A = int(input("Enter the number to be found in the list:\n").strip())
_A = ite_ternary_search(collection, target)
_A = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("Not found") | 505 | 1 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _UpperCAmelCase :
def __init__( self : Tuple , A : Union[str, Any] , A : Optional[int]=13 , A : Dict=30 , A : List[Any]=2 , A : List[Any]=3 , A : Tuple=True , A : Dict=True , A : Union[str, Any]=32 , A : Optional[int]=5 , A : Tuple=4 , A : Any=37 , A : Dict="gelu" , A : Optional[Any]=0.1 , A : Union[str, Any]=0.1 , A : Any=10 , A : Dict=0.02 , A : Any=3 , A : str=None , A : Dict=2 , ) -> Optional[int]:
lowercase_ : str = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : int = image_size
lowercase_ : str = patch_size
lowercase_ : Dict = num_channels
lowercase_ : Optional[Any] = is_training
lowercase_ : str = use_labels
lowercase_ : Tuple = hidden_size
lowercase_ : int = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : Dict = hidden_act
lowercase_ : Union[str, Any] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : List[str] = type_sequence_label_size
lowercase_ : Any = initializer_range
lowercase_ : Optional[int] = scope
lowercase_ : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase_ : Optional[Any] = (image_size // patch_size) ** 2
lowercase_ : List[Any] = num_patches + 2
def A ( self : Dict ) -> str:
lowercase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def A ( self : Tuple ) -> str:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A ( self : Optional[Any] , A : Optional[int] , A : Any , A : Any ) -> int:
lowercase_ : Any = DeiTModel(config=A )
model.to(A )
model.eval()
lowercase_ : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , A : int , A : Optional[int] , A : Optional[Any] ) -> str:
lowercase_ : int = DeiTForMaskedImageModeling(config=A )
model.to(A )
model.eval()
lowercase_ : int = model(A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ : int = 1
lowercase_ : Any = DeiTForMaskedImageModeling(A )
model.to(A )
model.eval()
lowercase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Dict = model(A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A ( self : int , A : str , A : Optional[int] , A : List[str] ) -> List[Any]:
lowercase_ : str = self.type_sequence_label_size
lowercase_ : Tuple = DeiTForImageClassification(A )
model.to(A )
model.eval()
lowercase_ : int = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : Dict = 1
lowercase_ : Optional[Any] = DeiTForImageClassification(A )
model.to(A )
model.eval()
lowercase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Dict = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : Dict ) -> Optional[Any]:
lowercase_ : List[str] = self.prepare_config_and_inputs()
(
lowercase_
) : Union[str, Any] = config_and_inputs
lowercase_ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Dict = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : List[str] = False
def A ( self : Optional[Any] ) -> Dict:
lowercase_ : List[Any] = DeiTModelTester(self )
lowercase_ : int = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=37 )
def A ( self : Optional[int] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def A ( self : int ) -> List[Any]:
pass
def A ( self : Any ) -> Tuple:
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def A ( self : Tuple ) -> Optional[int]:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(A )
lowercase_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Any = [*signature.parameters.keys()]
lowercase_ : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def A ( self : Tuple ) -> Optional[int]:
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : Dict ) -> List[str]:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def A ( self : int ) -> Union[str, Any]:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def A ( self : Tuple , A : Optional[int] , A : int , A : Optional[Any]=False ) -> int:
lowercase_ : Dict = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A ( self : int ) -> str:
if not self.model_tester.is_training:
return
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : str = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(A )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase_ : Any = model_class(A )
model.to(A )
model.train()
lowercase_ : Any = self._prepare_for_class(A , A , return_labels=A )
lowercase_ : Optional[Any] = model(**A ).loss
loss.backward()
def A ( self : Dict ) -> Union[str, Any]:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
if model_class in get_values(A ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase_ : Optional[Any] = model_class(A )
model.gradient_checkpointing_enable()
model.to(A )
model.train()
lowercase_ : str = self._prepare_for_class(A , A , return_labels=A )
lowercase_ : Optional[Any] = model(**A ).loss
loss.backward()
def A ( self : Tuple ) -> Any:
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[Any] = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(A ),
*get_values(A ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type['title']}''' ):
lowercase_ : int = problem_type['''title''']
lowercase_ : int = problem_type['''num_labels''']
lowercase_ : Dict = model_class(A )
model.to(A )
model.train()
lowercase_ : Tuple = self._prepare_for_class(A , A , return_labels=A )
if problem_type["num_labels"] > 1:
lowercase_ : Any = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
lowercase_ : Dict = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=A ) as warning_list:
lowercase_ : str = model(**A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def A ( self : Union[str, Any] ) -> Union[str, Any]:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = DeiTModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase ( ):
lowercase_ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self : Optional[Any] ) -> Dict:
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def A ( self : Optional[Any] ) -> Dict:
lowercase_ : Dict = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
A )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Tuple = prepare_img()
lowercase_ : Dict = image_processor(images=A , return_tensors='''pt''' ).to(A )
# forward pass
with torch.no_grad():
lowercase_ : Any = model(**A )
# verify the logits
lowercase_ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , A )
lowercase_ : Optional[Any] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A ( self : Union[str, Any] ) -> Tuple:
lowercase_ : int = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
lowercase_ : Any = self.default_image_processor
lowercase_ : Union[str, Any] = prepare_img()
lowercase_ : List[Any] = image_processor(images=A , return_tensors='''pt''' )
lowercase_ : Any = inputs.pixel_values.to(A )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ : List[Any] = model(A )
| 716 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__A : List[Any] = logging.getLogger(__name__)
if __name__ == "__main__":
__A : int = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30_522, type=int)
__A : Optional[int] = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, '''rb''') as fp:
__A : List[str] = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__A : Optional[Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
__A : Dict = [0] * args.vocab_size
for k, v in counter.items():
__A : Union[str, Any] = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 141 | 0 |
def UpperCamelCase ( _A : list[int] )-> list[list[int]]:
"""simple docstring"""
A__ = []
if len(_A ) == 1:
return [nums.copy()]
for _ in range(len(_A ) ):
A__ = nums.pop(0 )
A__ = permute(_A )
for perm in permutations:
perm.append(_A )
result.extend(_A )
nums.append(_A )
return result
def UpperCamelCase ( _A : Optional[Any] )-> List[str]:
"""simple docstring"""
def backtrack(_A : str ):
if start == len(_A ) - 1:
output.append(nums[:] )
else:
for i in range(_A , len(_A ) ):
A__ , A__ = nums[i], nums[start]
backtrack(start + 1 )
A__ , A__ = nums[i], nums[start] # backtrack
A__ = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
UpperCAmelCase_ : Optional[int] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 491 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
@dataclass
class UpperCamelCase :
lowerCAmelCase : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
lowerCAmelCase : str = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
lowerCAmelCase : int = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase : bool = field(
default=_UpperCAmelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __A ( self ):
A__ = self.task_name.lower()
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : int = """train"""
lowerCAmelCase : Tuple = """dev"""
lowerCAmelCase : Optional[Any] = """test"""
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : GlueDataTrainingArguments
lowerCAmelCase : str
lowerCAmelCase : List[InputFeatures]
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = Split.train , UpperCAmelCase__ = None , ):
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , UpperCAmelCase__ , )
A__ = args
A__ = glue_processors[args.task_name]()
A__ = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
A__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
A__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
A__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
A__ , A__ = label_list[2], label_list[1]
A__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A__ = cached_features_file + ".lock"
with FileLock(UpperCAmelCase__ ):
if os.path.exists(UpperCAmelCase__ ) and not args.overwrite_cache:
A__ = time.time()
A__ = torch.load(UpperCAmelCase__ )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
A__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
A__ = self.processor.get_test_examples(args.data_dir )
else:
A__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
A__ = examples[:limit_length]
A__ = glue_convert_examples_to_features(
UpperCAmelCase__ , UpperCAmelCase__ , max_length=args.max_seq_length , label_list=UpperCAmelCase__ , output_mode=self.output_mode , )
A__ = time.time()
torch.save(self.features , UpperCAmelCase__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
return len(self.features )
def __getitem__( self , UpperCAmelCase__ ):
return self.features[i]
def __A ( self ):
return self.label_list
| 491 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'OwlViTImageProcessor'
lowercase_ = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[str] , a_ : List[Any]=None , a_ : str=None , **a_ : Any )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a_ , )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE__ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a_ , a_ )
def __call__( self : Any , a_ : Optional[int]=None , a_ : Tuple=None , a_ : List[Any]=None , a_ : Tuple="max_length" , a_ : str="np" , **a_ : Any )-> int:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(a_ , a_ ) or (isinstance(a_ , a_ ) and not isinstance(text[0] , a_ )):
SCREAMING_SNAKE_CASE__ : Tuple = [self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )]
elif isinstance(a_ , a_ ) and isinstance(text[0] , a_ ):
SCREAMING_SNAKE_CASE__ : Any = []
# Maximum number of queries across batch
SCREAMING_SNAKE_CASE__ : str = max([len(a_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(a_ ) != max_num_queries:
SCREAMING_SNAKE_CASE__ : Tuple = t + [' '] * (max_num_queries - len(a_ ))
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(a_ , padding=a_ , return_tensors=a_ , **a_ )
encodings.append(a_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
SCREAMING_SNAKE_CASE__ : Dict = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ : List[Any] = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ : str = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
SCREAMING_SNAKE_CASE__ : Dict = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
SCREAMING_SNAKE_CASE__ : Optional[int] = BatchEncoding()
SCREAMING_SNAKE_CASE__ : List[str] = input_ids
SCREAMING_SNAKE_CASE__ : Tuple = attention_mask
if query_images is not None:
SCREAMING_SNAKE_CASE__ : Any = BatchEncoding()
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor(
a_ , return_tensors=a_ , **a_ ).pixel_values
SCREAMING_SNAKE_CASE__ : Dict = query_pixel_values
if images is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Dict = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def __lowercase( self : str , *a_ : List[str] , **a_ : int )-> List[Any]:
"""simple docstring"""
return self.image_processor.post_process(*a_ , **a_ )
def __lowercase( self : Tuple , *a_ : List[str] , **a_ : str )-> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*a_ , **a_ )
def __lowercase( self : Optional[Any] , *a_ : str , **a_ : Dict )-> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*a_ , **a_ )
def __lowercase( self : Optional[int] , *a_ : Tuple , **a_ : Tuple )-> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def __lowercase( self : Tuple , *a_ : Tuple , **a_ : Tuple )-> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
def __lowercase( self : Tuple )-> Any:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a_ , )
return self.image_processor_class
@property
def __lowercase( self : List[Any] )-> List[str]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a_ , )
return self.image_processor
| 636 | from math import factorial, radians
def _a ( lowercase__ : float , lowercase__ : int = 18 , lowercase__ : int = 10 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE__ : int = radians(lowercase__ )
SCREAMING_SNAKE_CASE__ : List[Any] = angle_in_radians
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = -1
for _ in range(lowercase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowercase__ , lowercase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 636 | 1 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowerCAmelCase_ ( unittest.TestCase):
def _snake_case ( self : int ) ->Union[str, Any]:
"""simple docstring"""
a__ :Union[str, Any] = get_activation("swish" )
self.assertIsInstance(__A , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _snake_case ( self : int ) ->List[str]:
"""simple docstring"""
a__ :int = get_activation("silu" )
self.assertIsInstance(__A , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _snake_case ( self : List[Any] ) ->Any:
"""simple docstring"""
a__ :List[Any] = get_activation("mish" )
self.assertIsInstance(__A , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
a__ :Tuple = get_activation("gelu" )
self.assertIsInstance(__A , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 395 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : Dict = """luke"""
def __init__( self , a=5_0267 , a=50_0000 , a=768 , a=256 , a=12 , a=12 , a=3072 , a="gelu" , a=0.1 , a=0.1 , a=512 , a=2 , a=0.02 , a=1e-12 , a=True , a=None , a=1 , a=0 , a=2 , **a , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a)
lowercase__ : Tuple = vocab_size
lowercase__ : Optional[Any] = entity_vocab_size
lowercase__ : Optional[int] = hidden_size
lowercase__ : List[str] = entity_emb_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : Any = num_attention_heads
lowercase__ : List[str] = hidden_act
lowercase__ : Any = intermediate_size
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Dict = type_vocab_size
lowercase__ : Tuple = initializer_range
lowercase__ : Any = layer_norm_eps
lowercase__ : Optional[Any] = use_entity_aware_attention
lowercase__ : Union[str, Any] = classifier_dropout
| 164 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
if len(__UpperCamelCase ) == 0:
return array
_a , _a = min(__UpperCamelCase ), max(__UpperCamelCase )
# Compute the variables
_a = _max - _min + 1
_a , _a = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_a = i - _min
_a = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_a = 0
for i in range(__UpperCamelCase ):
while holes_repeat[i] > 0:
_a = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : int = input('Enter numbers separated by comma:\n')
_snake_case : Tuple = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 713 |
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
_snake_case : int = None
_snake_case : Tuple = {
'7B': 11008,
'13B': 13824,
'30B': 17920,
'65B': 22016,
'70B': 28672,
}
_snake_case : int = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def snake_case_ (UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=256 ):
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def snake_case_ (UpperCamelCase : Optional[int] ):
'''simple docstring'''
with open(UpperCamelCase , '''r''' ) as f:
return json.load(UpperCamelCase )
def snake_case_ (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
with open(UpperCamelCase , '''w''' ) as f:
json.dump(UpperCamelCase , UpperCamelCase )
def snake_case_ (UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str=True ):
'''simple docstring'''
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
_a = os.path.join(UpperCamelCase , '''tmp''' )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
_a = read_json(os.path.join(UpperCamelCase , '''params.json''' ) )
_a = NUM_SHARDS[model_size]
_a = params['''n_layers''']
_a = params['''n_heads''']
_a = n_heads // num_shards
_a = params['''dim''']
_a = dim // n_heads
_a = 10000.0
_a = 1.0 / (base ** (torch.arange(0 , UpperCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_a = params['''n_kv_heads'''] # for GQA / MQA
_a = n_heads_per_shard // num_key_value_heads
_a = dim // num_key_value_heads
else: # compatibility with other checkpoints
_a = n_heads
_a = n_heads_per_shard
_a = dim
# permute for sliced rotary
def permute(UpperCamelCase : List[str] , UpperCamelCase : str=n_heads , UpperCamelCase : int=dim , UpperCamelCase : Optional[Any]=dim ):
return w.view(UpperCamelCase , dima // n_heads // 2 , 2 , UpperCamelCase ).transpose(1 , 2 ).reshape(UpperCamelCase , UpperCamelCase )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_a = torch.load(os.path.join(UpperCamelCase , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
_a = [
torch.load(os.path.join(UpperCamelCase , f'consolidated.{i:02d}.pth' ) , map_location='''cpu''' )
for i in range(UpperCamelCase )
]
_a = 0
_a = {'''weight_map''': {}}
for layer_i in range(UpperCamelCase ):
_a = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_a = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_a = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_a = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(UpperCamelCase , UpperCamelCase , UpperCamelCase )
for i in range(UpperCamelCase )
] , dim=0 , ).reshape(UpperCamelCase , UpperCamelCase ) )
_a = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
for i in range(UpperCamelCase )
] , dim=0 , ).reshape(UpperCamelCase , UpperCamelCase ) , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
_a = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
UpperCamelCase , UpperCamelCase , UpperCamelCase )
for i in range(UpperCamelCase )
] , dim=0 , ).reshape(UpperCamelCase , UpperCamelCase )
_a = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(UpperCamelCase )] , dim=1 )
_a = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(UpperCamelCase )] , dim=0 )
_a = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(UpperCamelCase )] , dim=1 )
_a = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(UpperCamelCase )] , dim=0 )
_a = inv_freq
for k, v in state_dict.items():
_a = filename
param_count += v.numel()
torch.save(UpperCamelCase , os.path.join(UpperCamelCase , UpperCamelCase ) )
_a = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_a = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
_a = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(UpperCamelCase )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(UpperCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
_a = filename
param_count += v.numel()
torch.save(UpperCamelCase , os.path.join(UpperCamelCase , UpperCamelCase ) )
# Write configs
_a = {'''total_size''': param_count * 2}
write_json(UpperCamelCase , os.path.join(UpperCamelCase , '''pytorch_model.bin.index.json''' ) )
_a = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
_a = params['''multiple_of'''] if '''multiple_of''' in params else 256
_a = LlamaConfig(
hidden_size=UpperCamelCase , intermediate_size=compute_intermediate_size(UpperCamelCase , UpperCamelCase , UpperCamelCase ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=UpperCamelCase , )
config.save_pretrained(UpperCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
_a = LlamaForCausalLM.from_pretrained(UpperCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=UpperCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(UpperCamelCase , safe_serialization=UpperCamelCase )
shutil.rmtree(UpperCamelCase )
def snake_case_ (UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ):
'''simple docstring'''
_a = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_a = tokenizer_class(UpperCamelCase )
tokenizer.save_pretrained(UpperCamelCase )
def snake_case_ ():
'''simple docstring'''
_a = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=UpperCamelCase , help='''Whether or not to save using `safetensors`.''' )
_a = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_a = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , UpperCamelCase )
if __name__ == "__main__":
main()
| 377 | 0 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __magic_name__ ( ) -> Tuple:
a__ = torch.nn.Linear(2 , 4 )
a__ = torch.optim.AdamW(model.parameters() , lr=1.0 )
a__ = torch.optim.lr_scheduler.OneCycleLR(UpperCamelCase , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
a__ = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
a__ = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __magic_name__ ( UpperCamelCase : Dict ) -> Optional[int]:
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __magic_name__ ( UpperCamelCase : Any ) -> List[str]:
a__ = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(UpperCamelCase )
class lowercase(_lowercase ):
@require_cuda
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
a__ = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(UpperCAmelCase_ ):
a__ = Accelerator(cpu=UpperCAmelCase_ )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
a__ = Accelerator()
a__ = GradientState()
assert state.num_steps == 1
a__ = 4
assert state.num_steps == 4
assert state.sync_gradients is True
a__ = False
assert state.sync_gradients is False
GradientState._reset_state()
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
a__ = Accelerator()
a__ = create_components()
(
a__
) = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
a__ = Accelerator()
a__ = create_components()
accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def lowercase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
pass
with patch('torch.cuda.set_device' , UpperCAmelCase_ ), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64' ):
a__ = Accelerator()
self.assertEqual(str(accelerator.state.device ) , 'cuda:64' )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
a__ = Accelerator()
a__ = create_components()
accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
a__ = get_signature(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCAmelCase_ )
# make sure random weights don't match
load_random_weights(UpperCAmelCase_ )
self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase_ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(UpperCAmelCase_ )
self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase_ ) ) < 1e-3 )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
a__ = Accelerator()
a__ = create_components()
accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
a__ = get_signature(UpperCAmelCase_ )
# saving hook
def save_config(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
a__ = {"class_name": models[0].__class__.__name__}
with open(os.path.join(UpperCAmelCase_ , 'data.json' ) , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
# loading hook
def load_config(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
with open(os.path.join(UpperCAmelCase_ , 'data.json' ) , 'r' ) as f:
a__ = json.load(UpperCAmelCase_ )
a__ = config["class_name"]
a__ = accelerator.register_save_state_pre_hook(UpperCAmelCase_ )
a__ = accelerator.register_load_state_pre_hook(UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCAmelCase_ )
# make sure random weights don't match with hooks
load_random_weights(UpperCAmelCase_ )
self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
a__ = "random"
# make sure loaded weights match with hooks
accelerator.load_state(UpperCAmelCase_ )
self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase_ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCAmelCase_ )
# make sure random weights don't match with hooks removed
load_random_weights(UpperCAmelCase_ )
self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase_ ) ) > 1e-3 )
# random class name to verify correct one is loaded
a__ = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(UpperCAmelCase_ )
self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase_ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def lowercase__ ( self ) -> List[str]:
"""simple docstring"""
a__ = Accelerator()
a__ = create_components()
a__ = None
# This should work
a__ = accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.assertTrue(dummy_obj is None )
def lowercase__ ( self ) -> str:
"""simple docstring"""
a__ = Accelerator()
a__ = create_components()
a__ = [1, 2, 3]
# This should work
a__ = accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual(
getattr(UpperCAmelCase_ , '_is_accelerate_prepared' , UpperCAmelCase_ ) , UpperCAmelCase_ , 'Dummy object should have `_is_accelerate_prepared` set to `True`' , )
self.assertEqual(
getattr(UpperCAmelCase_ , '_is_accelerate_prepared' , UpperCAmelCase_ ) , UpperCAmelCase_ , 'Model is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(UpperCAmelCase_ , '_is_accelerate_prepared' , UpperCAmelCase_ ) , UpperCAmelCase_ , 'Optimizer is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(UpperCAmelCase_ , '_is_accelerate_prepared' , UpperCAmelCase_ ) , UpperCAmelCase_ , 'Scheduler is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(UpperCAmelCase_ , '_is_accelerate_prepared' , UpperCAmelCase_ ) , UpperCAmelCase_ , 'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(UpperCAmelCase_ , '_is_accelerate_prepared' , UpperCAmelCase_ ) , UpperCAmelCase_ , 'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
@slow
@require_bnb
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
from transformers import AutoModelForCausalLM
a__ = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=UpperCAmelCase_ , device_map={'': 0} , )
a__ = Accelerator()
# This should work
a__ = accelerator.prepare(UpperCAmelCase_ )
@slow
@require_bnb
def lowercase__ ( self ) -> List[Any]:
"""simple docstring"""
from transformers import AutoModelForCausalLM
a__ = Accelerator()
with init_empty_weights():
a__ = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
a__ = infer_auto_device_map(UpperCAmelCase_ )
a__ = "cpu"
a__ = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , device_map=UpperCAmelCase_ , load_in_abit=UpperCAmelCase_ , llm_inta_enable_fpaa_cpu_offload=UpperCAmelCase_ )
# This should not work and get value error
with self.assertRaises(UpperCAmelCase_ ):
a__ = accelerator.prepare(UpperCAmelCase_ )
@slow
@require_bnb
@require_multi_gpu
def lowercase__ ( self ) -> int:
"""simple docstring"""
from transformers import AutoModelForCausalLM
a__ = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
a__ = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
a__ = infer_auto_device_map(UpperCAmelCase_ )
a__ = 1
a__ = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=UpperCAmelCase_ , device_map=UpperCAmelCase_ , )
a__ = Accelerator()
# This should not work and get value error
with self.assertRaises(UpperCAmelCase_ ):
a__ = accelerator.prepare(UpperCAmelCase_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
from transformers import AutoModelForCausalLM
with init_empty_weights():
a__ = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
a__ = infer_auto_device_map(UpperCAmelCase_ )
a__ = 1
a__ = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=UpperCAmelCase_ , device_map=UpperCAmelCase_ , )
a__ = Accelerator()
# This should work
a__ = accelerator.prepare(UpperCAmelCase_ )
@require_cuda
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
a__ = torch.nn.Linear(1_0 , 1_0 )
a__ = torch.optim.SGD(model.parameters() , lr=0.01 )
a__ = Accelerator(cpu=UpperCAmelCase_ )
a__ = accelerator.prepare(UpperCAmelCase_ )
| 273 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : int = LayoutLMTokenizer
UpperCamelCase_ : str = LayoutLMTokenizerFast
UpperCamelCase_ : Any = True
UpperCamelCase_ : Optional[Any] = True
def _A ( self : Any ):
super().setUp()
SCREAMING_SNAKE_CASE : Optional[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _A ( self : str , **UpperCAmelCase_ : Optional[int] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def _A ( self : Tuple , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : Any = "UNwant\u00E9d,running"
SCREAMING_SNAKE_CASE : Union[str, Any] = "unwanted, running"
return input_text, output_text
def _A ( self : int ):
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(UpperCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [7, 4, 5, 10, 8, 9] )
def _A ( self : List[str] ):
pass
| 62 | 0 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase_ = logging.get_logger(__name__)
@dataclass
class _a :
'''simple docstring'''
A : str = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys() )} )
A : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
A : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.task_name.lower()
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[Any] = """train"""
A : Optional[int] = """dev"""
A : Optional[Any] = """test"""
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : GlueDataTrainingArguments
A : str
A : List[InputFeatures]
def __init__( self, A, A, A = None, A = Split.train, A = None, ):
'''simple docstring'''
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py', A, )
SCREAMING_SNAKE_CASE : Union[str, Any] = args
SCREAMING_SNAKE_CASE : Any = glue_processors[args.task_name]()
SCREAMING_SNAKE_CASE : Optional[Any] = glue_output_modes[args.task_name]
if isinstance(A, A ):
try:
SCREAMING_SNAKE_CASE : str = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}", )
SCREAMING_SNAKE_CASE : str = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = label_list[2], label_list[1]
SCREAMING_SNAKE_CASE : Union[str, Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE : List[str] = cached_features_file + '.lock'
with FileLock(A ):
if os.path.exists(A ) and not args.overwrite_cache:
SCREAMING_SNAKE_CASE : Union[str, Any] = time.time()
SCREAMING_SNAKE_CASE : Dict = torch.load(A )
logger.info(
F"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start )
else:
logger.info(F"Creating features from dataset file at {args.data_dir}" )
if mode == Split.dev:
SCREAMING_SNAKE_CASE : Optional[Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.processor.get_test_examples(args.data_dir )
else:
SCREAMING_SNAKE_CASE : Any = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
SCREAMING_SNAKE_CASE : int = examples[:limit_length]
SCREAMING_SNAKE_CASE : int = glue_convert_examples_to_features(
A, A, max_length=args.max_seq_length, label_list=A, output_mode=self.output_mode, )
SCREAMING_SNAKE_CASE : Any = time.time()
torch.save(self.features, A )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self, A ):
'''simple docstring'''
return self.features[i]
def UpperCamelCase_ ( self ):
'''simple docstring'''
return self.label_list
| 721 |
'''simple docstring'''
import socket
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE : Any = socket.gethostname()
SCREAMING_SNAKE_CASE : str = 1_23_12
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' ,'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
SCREAMING_SNAKE_CASE : int = sock.recv(10_24 )
if not data:
break
out_file.write(__UpperCamelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 508 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _UpperCAmelCase ( __A : List[Any] ):
a_ : int = SwinConfig(image_size=1_92 )
if "base" in model_name:
a_ : Dict = 6
a_ : Any = 1_28
a_ : List[Any] = (2, 2, 18, 2)
a_ : Dict = (4, 8, 16, 32)
elif "large" in model_name:
a_ : List[Any] = 12
a_ : List[Any] = 1_92
a_ : Optional[Any] = (2, 2, 18, 2)
a_ : List[Any] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
a_ : str = window_size
a_ : Dict = embed_dim
a_ : List[str] = depths
a_ : Tuple = num_heads
return config
def _UpperCAmelCase ( __A : Dict ):
if "encoder.mask_token" in name:
a_ : List[str] = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
a_ : Optional[int] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
a_ : Optional[int] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
a_ : Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
a_ : Union[str, Any] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
a_ : Optional[int] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
a_ : Any = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
a_ : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
a_ : str = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
a_ : List[Any] = 'layernorm.weight'
if name == "encoder.norm.bias":
a_ : Optional[Any] = 'layernorm.bias'
if "decoder" in name:
pass
else:
a_ : Any = 'swin.' + name
return name
def _UpperCAmelCase ( __A : List[Any] , __A : Tuple ):
for key in orig_state_dict.copy().keys():
a_ : Tuple = orig_state_dict.pop(lowerCAmelCase__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
a_ : Any = key.split('''.''' )
a_ : int = int(key_split[2] )
a_ : Optional[Any] = int(key_split[4] )
a_ : List[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a_ : Optional[int] = val[:dim, :]
a_ : str = val[
dim : dim * 2, :
]
a_ : Optional[int] = val[-dim:, :]
else:
a_ : Tuple = val[
:dim
]
a_ : List[str] = val[
dim : dim * 2
]
a_ : Dict = val[
-dim:
]
else:
a_ : List[str] = val
return orig_state_dict
def _UpperCAmelCase ( __A : Any , __A : str , __A : List[Any] , __A : Union[str, Any] ):
a_ : Optional[Any] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['model']
a_ : Tuple = get_swin_config(lowerCAmelCase__ )
a_ : Optional[Any] = SwinForMaskedImageModeling(lowerCAmelCase__ )
model.eval()
a_ : int = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
a_ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a_ : Any = ViTImageProcessor(size={'''height''': 1_92, '''width''': 1_92} )
a_ : int = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
a_ : Optional[int] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' )
with torch.no_grad():
a_ : List[Any] = model(**lowerCAmelCase__ ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print(f'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(f'microsoft/{model_name}' )
image_processor.push_to_hub(f'microsoft/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCAmelCase = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 466 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowercase : Any =False
@skip_mps
class UpperCamelCase_ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_a : Optional[int] = StableDiffusionAttendAndExcitePipeline
_a : Union[str, Any] = False
_a : Dict = TEXT_TO_IMAGE_PARAMS
_a : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
_a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_a : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __a ( cls : Tuple ):
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase )
@classmethod
def __a ( cls : Tuple ):
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase )
def __a ( self : Dict ):
torch.manual_seed(0 )
lowerCamelCase_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase , )
lowerCamelCase_ : Dict = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , )
torch.manual_seed(0 )
lowerCamelCase_ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
lowerCamelCase_ : Any = CLIPTextModel(lowerCamelCase )
lowerCamelCase_ : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase_ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : List[Any]=0 ):
if str(lowerCamelCase ).startswith('mps' ):
lowerCamelCase_ : Union[str, Any] = torch.manual_seed(lowerCamelCase )
else:
lowerCamelCase_ : Optional[Any] = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
lowerCamelCase_ : Dict = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def __a ( self : Union[str, Any] ):
lowerCamelCase_ : List[Any] = 'cpu'
lowerCamelCase_ : List[Any] = self.get_dummy_components()
lowerCamelCase_ : List[str] = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
lowerCamelCase_ : List[Any] = self.get_dummy_inputs(lowerCamelCase )
lowerCamelCase_ : List[str] = pipe(**lowerCamelCase ).images
lowerCamelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase_ : Dict = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
lowerCamelCase_ : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1E-3 )
def __a ( self : Tuple ):
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def __a ( self : Dict ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __a ( self : List[str] ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def __a ( self : str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def __a ( self : Optional[Any] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def __a ( self : Tuple ):
super().test_save_load_local(expected_max_difference=5E-4 )
def __a ( self : List[Any] ):
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase ):
@classmethod
def __a ( cls : Union[str, Any] ):
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase )
@classmethod
def __a ( cls : List[Any] ):
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase )
def __a ( self : List[str] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : str ):
lowerCamelCase_ : Optional[Any] = torch.manual_seed(51 )
lowerCamelCase_ : Any = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=lowerCamelCase , torch_dtype=torch.floataa )
pipe.to('cuda' )
lowerCamelCase_ : List[Any] = 'a painting of an elephant with glasses'
lowerCamelCase_ : Tuple = [5, 7]
lowerCamelCase_ : List[str] = pipe(
prompt=lowerCamelCase , token_indices=lowerCamelCase , guidance_scale=7.5 , generator=lowerCamelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
lowerCamelCase_ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 364 | 0 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( __snake_case : list[int] ):
'''simple docstring'''
if len(__snake_case ) == 0:
return array
lowercase , lowercase = min(__snake_case ), max(__snake_case )
# Compute the variables
lowercase = _max - _min + 1
lowercase , lowercase = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
lowercase = i - _min
lowercase = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
lowercase = 0
for i in range(__snake_case ):
while holes_repeat[i] > 0:
lowercase = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase : Optional[Any] = input('Enter numbers separated by comma:\n')
_UpperCamelCase : Any = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 134 |
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_UpperCamelCase : int = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : int , __snake_case : Dict ):
'''simple docstring'''
lowercase = UniSpeechSatForSequenceClassification.from_pretrained(__snake_case , config=__snake_case )
lowercase = downstream_dict['projector.weight']
lowercase = downstream_dict['projector.bias']
lowercase = downstream_dict['model.post_net.linear.weight']
lowercase = downstream_dict['model.post_net.linear.bias']
return model
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Union[str, Any] ):
'''simple docstring'''
lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(__snake_case , config=__snake_case )
lowercase = downstream_dict['model.linear.weight']
lowercase = downstream_dict['model.linear.bias']
return model
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] , __snake_case : Dict , __snake_case : Any ):
'''simple docstring'''
lowercase = UniSpeechSatForXVector.from_pretrained(__snake_case , config=__snake_case )
lowercase = downstream_dict['connector.weight']
lowercase = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowercase = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
lowercase = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
lowercase = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
lowercase = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
lowercase = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
lowercase = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
lowercase = downstream_dict['objective.W']
return model
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Dict ):
'''simple docstring'''
lowercase = torch.load(__snake_case , map_location='cpu' )
lowercase = checkpoint['Downstream']
lowercase = UniSpeechSatConfig.from_pretrained(__snake_case )
lowercase = WavaVecaFeatureExtractor.from_pretrained(
__snake_case , return_attention_mask=__snake_case , do_normalize=__snake_case )
lowercase = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
lowercase = convert_classification(__snake_case , __snake_case , __snake_case )
elif arch.endswith('ForAudioFrameClassification' ):
lowercase = convert_diarization(__snake_case , __snake_case , __snake_case )
elif arch.endswith('ForXVector' ):
lowercase = convert_xvector(__snake_case , __snake_case , __snake_case )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
lowercase = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(__snake_case )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
_UpperCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
_UpperCamelCase : Optional[int] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 134 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
UpperCamelCase_ : Union[str, Any] = '''CIDAS/clipseg-rd64-refined'''
UpperCamelCase_ : Any = '''image_segmenter'''
UpperCamelCase_ : int = CLIPSegForImageSegmentation
UpperCamelCase_ : Optional[Any] = ['''image''', '''text''']
UpperCamelCase_ : int = ['''image''']
def __init__( self : str , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any] ):
requires_backends(self , ["vision"] )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _A ( self : Tuple , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str ):
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors="pt" )
def _A ( self : str , UpperCAmelCase_ : Optional[Any] ):
with torch.no_grad():
SCREAMING_SNAKE_CASE : str = self.model(**UpperCAmelCase_ ).logits
return logits
def _A ( self : Union[str, Any] , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : Optional[int] = outputs.cpu().detach().numpy()
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 62 |
"""simple docstring"""
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
stooge(_UpperCamelCase , 0 , len(_UpperCamelCase ) - 1 )
return arr
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
_lowercase , _lowercase: Optional[Any] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
_lowercase: Tuple = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_UpperCamelCase , _UpperCamelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(_UpperCamelCase , i + t , (_UpperCamelCase) )
# Recursively sort first 2/3 elements
stooge(_UpperCamelCase , _UpperCamelCase , (h - t) )
if __name__ == "__main__":
A__ : Dict = input('Enter numbers separated by a comma:\n').strip()
A__ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 353 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase__ : int = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[Any] = ['DPTFeatureExtractor']
UpperCamelCase__ : List[str] = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 713 | '''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
UpperCamelCase__ : Union[str, Any] = re.compile(r'\b(a|an|the)\b', re.UNICODE)
UpperCamelCase__ : List[Any] = None
def __UpperCamelCase( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' )
parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' )
parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' )
parser.add_argument(
'''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' )
parser.add_argument(
'''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' )
parser.add_argument(
'''--na-prob-thresh''' , '''-t''' , type=_A , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , )
parser.add_argument(
'''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=_A , help='''Save precision-recall curves to directory.''' )
parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __UpperCamelCase( _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase__ : Union[str, Any] = bool(qa['''answers''']['''text'''] )
return qid_to_has_ans
def __UpperCamelCase( _A : Dict ):
'''simple docstring'''
def remove_articles(_A : Union[str, Any] ):
return ARTICLES_REGEX.sub(''' ''' , _A )
def white_space_fix(_A : Optional[int] ):
return " ".join(text.split() )
def remove_punc(_A : Optional[Any] ):
UpperCAmelCase__ : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def __UpperCamelCase( _A : Optional[Any] ):
'''simple docstring'''
if not s:
return []
return normalize_answer(_A ).split()
def __UpperCamelCase( _A : Tuple , _A : str ):
'''simple docstring'''
return int(normalize_answer(_A ) == normalize_answer(_A ) )
def __UpperCamelCase( _A : Optional[Any] , _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = get_tokens(_A )
UpperCAmelCase__ : Tuple = get_tokens(_A )
UpperCAmelCase__ : Any = collections.Counter(_A ) & collections.Counter(_A )
UpperCAmelCase__ : List[Any] = sum(common.values() )
if len(_A ) == 0 or len(_A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCAmelCase__ : Optional[Any] = 1.0 * num_same / len(_A )
UpperCAmelCase__ : Tuple = 1.0 * num_same / len(_A )
UpperCAmelCase__ : Optional[int] = (2 * precision * recall) / (precision + recall)
return fa
def __UpperCamelCase( _A : List[str] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Any = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCAmelCase__ : str = qa['''id''']
UpperCAmelCase__ : List[Any] = [t for t in qa['''answers''']['''text'''] if normalize_answer(_A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCAmelCase__ : Tuple = ['''''']
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
UpperCAmelCase__ : Union[str, Any] = preds[qid]
# Take max over all gold answers
UpperCAmelCase__ : List[str] = max(compute_exact(_A , _A ) for a in gold_answers )
UpperCAmelCase__ : List[str] = max(compute_fa(_A , _A ) for a in gold_answers )
return exact_scores, fa_scores
def __UpperCamelCase( _A : Any , _A : Optional[Any] , _A : List[str] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = {}
for qid, s in scores.items():
UpperCAmelCase__ : Dict = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCAmelCase__ : Any = float(not qid_to_has_ans[qid] )
else:
UpperCAmelCase__ : List[str] = s
return new_scores
def __UpperCamelCase( _A : str , _A : Optional[Any] , _A : Any=None ):
'''simple docstring'''
if not qid_list:
UpperCAmelCase__ : List[Any] = len(_A )
return collections.OrderedDict(
[
('''exact''', 1_0_0.0 * sum(exact_scores.values() ) / total),
('''f1''', 1_0_0.0 * sum(fa_scores.values() ) / total),
('''total''', total),
] )
else:
UpperCAmelCase__ : List[str] = len(_A )
return collections.OrderedDict(
[
('''exact''', 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
('''f1''', 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
('''total''', total),
] )
def __UpperCamelCase( _A : List[str] , _A : List[Any] , _A : Tuple ):
'''simple docstring'''
for k in new_eval:
UpperCAmelCase__ : List[str] = new_eval[k]
def __UpperCamelCase( _A : Tuple , _A : Any , _A : Optional[int] , _A : int ):
'''simple docstring'''
plt.step(_A , _A , color='''b''' , alpha=0.2 , where='''post''' )
plt.fill_between(_A , _A , step='''post''' , alpha=0.2 , color='''b''' )
plt.xlabel('''Recall''' )
plt.ylabel('''Precision''' )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(_A )
plt.savefig(_A )
plt.clf()
def __UpperCamelCase( _A : Optional[int] , _A : Tuple , _A : Any , _A : Any , _A : Tuple=None , _A : Union[str, Any]=None ):
'''simple docstring'''
UpperCAmelCase__ : int = sorted(_A , key=lambda _A : na_probs[k] )
UpperCAmelCase__ : Tuple = 0.0
UpperCAmelCase__ : Any = 1.0
UpperCAmelCase__ : Any = 0.0
UpperCAmelCase__ : Union[str, Any] = [1.0]
UpperCAmelCase__ : int = [0.0]
UpperCAmelCase__ : Optional[Any] = 0.0
for i, qid in enumerate(_A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCAmelCase__ : Optional[Any] = true_pos / float(i + 1 )
UpperCAmelCase__ : int = true_pos / float(_A )
if i == len(_A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_A )
recalls.append(_A )
if out_image:
plot_pr_curve(_A , _A , _A , _A )
return {"ap": 1_0_0.0 * avg_prec}
def __UpperCamelCase( _A : Any , _A : Optional[Any] , _A : List[Any] , _A : Any , _A : Dict , _A : Any ):
'''simple docstring'''
if out_image_dir and not os.path.exists(_A ):
os.makedirs(_A )
UpperCAmelCase__ : List[str] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCAmelCase__ : Dict = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , )
UpperCAmelCase__ : Any = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , )
UpperCAmelCase__ : Tuple = {k: float(_A ) for k, v in qid_to_has_ans.items()}
UpperCAmelCase__ : Any = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , )
merge_eval(_A , _A , '''pr_exact''' )
merge_eval(_A , _A , '''pr_f1''' )
merge_eval(_A , _A , '''pr_oracle''' )
def __UpperCamelCase( _A : Tuple , _A : Dict , _A : Dict , _A : Tuple ):
'''simple docstring'''
if not qid_list:
return
UpperCAmelCase__ : Optional[Any] = [na_probs[k] for k in qid_list]
UpperCAmelCase__ : Union[str, Any] = np.ones_like(_A ) / float(len(_A ) )
plt.hist(_A , weights=_A , bins=20 , range=(0.0, 1.0) )
plt.xlabel('''Model probability of no-answer''' )
plt.ylabel('''Proportion of dataset''' )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(_A , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def __UpperCamelCase( _A : List[Any] , _A : List[str] , _A : Optional[int] , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCAmelCase__ : List[str] = num_no_ans
UpperCAmelCase__ : Any = cur_score
UpperCAmelCase__ : List[str] = 0.0
UpperCAmelCase__ : Dict = sorted(_A , key=lambda _A : na_probs[k] )
for i, qid in enumerate(_A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCAmelCase__ : int = scores[qid]
else:
if preds[qid]:
UpperCAmelCase__ : Any = -1
else:
UpperCAmelCase__ : Dict = 0
cur_score += diff
if cur_score > best_score:
UpperCAmelCase__ : Optional[Any] = cur_score
UpperCAmelCase__ : Tuple = na_probs[qid]
return 1_0_0.0 * best_score / len(_A ), best_thresh
def __UpperCamelCase( _A : str , _A : str , _A : int , _A : int , _A : Tuple , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = find_best_thresh(_A , _A , _A , _A )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = find_best_thresh(_A , _A , _A , _A )
UpperCAmelCase__ : List[str] = best_exact
UpperCAmelCase__ : Any = exact_thresh
UpperCAmelCase__ : Dict = best_fa
UpperCAmelCase__ : Dict = fa_thresh
def __UpperCamelCase( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
UpperCAmelCase__ : Dict = json.load(_A )
UpperCAmelCase__ : str = dataset_json['''data''']
with open(OPTS.pred_file ) as f:
UpperCAmelCase__ : Any = json.load(_A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCAmelCase__ : Optional[Any] = json.load(_A )
else:
UpperCAmelCase__ : Dict = {k: 0.0 for k in preds}
UpperCAmelCase__ : int = make_qid_to_has_ans(_A ) # maps qid to True/False
UpperCAmelCase__ : Any = [k for k, v in qid_to_has_ans.items() if v]
UpperCAmelCase__ : Optional[Any] = [k for k, v in qid_to_has_ans.items() if not v]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = get_raw_scores(_A , _A )
UpperCAmelCase__ : Optional[int] = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh )
UpperCAmelCase__ : Dict = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh )
UpperCAmelCase__ : Union[str, Any] = make_eval_dict(_A , _A )
if has_ans_qids:
UpperCAmelCase__ : Optional[int] = make_eval_dict(_A , _A , qid_list=_A )
merge_eval(_A , _A , '''HasAns''' )
if no_ans_qids:
UpperCAmelCase__ : Dict = make_eval_dict(_A , _A , qid_list=_A )
merge_eval(_A , _A , '''NoAns''' )
if OPTS.na_prob_file:
find_all_best_thresh(_A , _A , _A , _A , _A , _A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_A , _A , _A , _A , _A , OPTS.out_image_dir )
histogram_na_prob(_A , _A , OPTS.out_image_dir , '''hasAns''' )
histogram_na_prob(_A , _A , OPTS.out_image_dir , '''noAns''' )
if OPTS.out_file:
with open(OPTS.out_file , '''w''' ) as f:
json.dump(_A , _A )
else:
print(json.dumps(_A , indent=2 ) )
if __name__ == "__main__":
UpperCamelCase__ : Tuple = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 496 | 0 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_SCREAMING_SNAKE_CASE : Optional[int] = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : bool = field(default=__lowerCamelCase , metadata={'help': 'Whether to use SortishSampler or not.'} )
a__ : bool = field(
default=__lowerCamelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
a__ : Optional[int] = field(
default=__lowerCamelCase , metadata={
'help': (
'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `max_length` value of the model configuration.'
)
} , )
a__ : Optional[int] = field(
default=__lowerCamelCase , metadata={
'help': (
'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '
'to the `num_beams` value of the model configuration.'
)
} , )
a__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=__lowerCamelCase , metadata={
'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'
} , )
def __lowercase( self : Optional[Any] ) -> List[Any]:
UpperCamelCase__ : int = super().to_dict()
for k, v in d.items():
if isinstance(__lowerCamelCase, __lowerCamelCase ):
UpperCamelCase__ : Dict = v.to_dict()
return d
| 344 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
def __lowercase( self : List[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowercase( self : int ) -> Tuple:
UpperCamelCase__ : Optional[int] = 1
UpperCamelCase__ : List[Any] = 3
UpperCamelCase__ : Optional[int] = (32, 32)
UpperCamelCase__ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(__lowerCamelCase )
return image
@property
def __lowercase( self : str ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCamelCase__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, )
return model
@property
def __lowercase( self : Dict ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCamelCase__ : str = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, )
return model
@property
def __lowercase( self : Dict ) -> Optional[int]:
torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, )
return CLIPTextModel(__lowerCamelCase )
@property
def __lowercase( self : Dict ) -> Any:
def extract(*__lowerCamelCase : Dict, **__lowerCamelCase : List[str] ):
class UpperCamelCase__ :
def __init__( self : Union[str, Any] ) -> Dict:
UpperCamelCase__ : int = torch.ones([0] )
def __lowercase( self : Union[str, Any], __lowerCamelCase : List[str] ) -> str:
self.pixel_values.to(__lowerCamelCase )
return self
return Out()
return extract
def __lowercase( self : Dict ) -> str:
UpperCamelCase__ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Optional[Any] = self.dummy_cond_unet
UpperCamelCase__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='''scaled_linear''', clip_sample=__lowerCamelCase, set_alpha_to_one=__lowerCamelCase, )
UpperCamelCase__ : Optional[Any] = self.dummy_vae
UpperCamelCase__ : int = self.dummy_text_encoder
UpperCamelCase__ : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Tuple = StableDiffusionPipeline(
unet=__lowerCamelCase, scheduler=__lowerCamelCase, vae=__lowerCamelCase, text_encoder=__lowerCamelCase, tokenizer=__lowerCamelCase, safety_checker=__lowerCamelCase, feature_extractor=self.dummy_extractor, )
UpperCamelCase__ : Union[str, Any] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase__ : Optional[int] = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ : List[str] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCamelCase__ : Union[str, Any] = sd_pipe([prompt], generator=__lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''' )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : Union[str, Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCamelCase__ : Optional[Any] = sd_pipe(
[prompt], generator=__lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''', return_dict=__lowerCamelCase, )[0]
UpperCamelCase__ : Optional[int] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : str = np.array([0.5756, 0.6118, 0.5005, 0.5041, 0.5471, 0.4726, 0.4976, 0.4865, 0.4864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase( self : Dict ) -> Any:
UpperCamelCase__ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ : Dict = self.dummy_cond_unet
UpperCamelCase__ : Optional[int] = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
UpperCamelCase__ : Dict = self.dummy_vae
UpperCamelCase__ : Tuple = self.dummy_text_encoder
UpperCamelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : str = StableDiffusionPipeline(
unet=__lowerCamelCase, scheduler=__lowerCamelCase, vae=__lowerCamelCase, text_encoder=__lowerCamelCase, tokenizer=__lowerCamelCase, safety_checker=__lowerCamelCase, feature_extractor=self.dummy_extractor, )
UpperCamelCase__ : Dict = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase__ : int = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ : Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCamelCase__ : int = sd_pipe([prompt], generator=__lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''' )
UpperCamelCase__ : List[str] = output.images
UpperCamelCase__ : str = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCamelCase__ : Optional[Any] = sd_pipe(
[prompt], generator=__lowerCamelCase, guidance_scale=6.0, num_inference_steps=2, output_type='''np''', return_dict=__lowerCamelCase, )[0]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase__ : List[Any] = np.array([0.5125, 0.5716, 0.4828, 0.5060, 0.5650, 0.4768, 0.5185, 0.4895, 0.4993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase( self : Tuple ) -> List[Any]:
UpperCamelCase__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-lms-pipe''', safety_checker=__lowerCamelCase )
assert isinstance(__lowerCamelCase, __lowerCamelCase )
assert isinstance(pipe.scheduler, __lowerCamelCase )
assert pipe.safety_checker is None
UpperCamelCase__ : Tuple = pipe('''example prompt''', num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
UpperCamelCase__ : str = StableDiffusionPipeline.from_pretrained(__lowerCamelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
UpperCamelCase__ : int = pipe('''example prompt''', num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != '''cuda''', '''This test requires a GPU''' )
def __lowercase( self : Dict ) -> int:
UpperCamelCase__ : Optional[int] = self.dummy_cond_unet
UpperCamelCase__ : str = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
UpperCamelCase__ : str = self.dummy_vae
UpperCamelCase__ : List[str] = self.dummy_text_encoder
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
# put models in fp16
UpperCamelCase__ : Dict = unet.half()
UpperCamelCase__ : List[Any] = vae.half()
UpperCamelCase__ : Tuple = bert.half()
# make sure here that pndm scheduler skips prk
UpperCamelCase__ : Optional[int] = StableDiffusionPipeline(
unet=__lowerCamelCase, scheduler=__lowerCamelCase, vae=__lowerCamelCase, text_encoder=__lowerCamelCase, tokenizer=__lowerCamelCase, safety_checker=__lowerCamelCase, feature_extractor=self.dummy_extractor, )
UpperCamelCase__ : Tuple = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase__ : Any = '''A painting of a squirrel eating a burger'''
UpperCamelCase__ : Tuple = sd_pipe([prompt], num_inference_steps=2, output_type='''np''' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
def __lowercase( self : str ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase( self : List[Any] ) -> int:
UpperCamelCase__ : Any = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''', safety_checker=__lowerCamelCase )
UpperCamelCase__ : Optional[int] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCamelCase__ : Any = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase__ : Any = (
'''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'''
''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'''
''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'''
''' children from bahnhof zoo, detailed '''
)
UpperCamelCase__ : Tuple = 40_03_66_03_46
UpperCamelCase__ : List[Any] = 7
# without safety guidance (sld_guidance_scale = 0)
UpperCamelCase__ : Dict = torch.manual_seed(__lowerCamelCase )
UpperCamelCase__ : Any = sd_pipe(
[prompt], generator=__lowerCamelCase, guidance_scale=__lowerCamelCase, num_inference_steps=50, output_type='''np''', width=5_12, height=5_12, sld_guidance_scale=0, )
UpperCamelCase__ : List[Any] = output.images
UpperCamelCase__ : Optional[int] = image[0, -3:, -3:, -1]
UpperCamelCase__ : int = [0.2278, 0.2231, 0.2249, 0.2333, 0.2303, 0.1885, 0.2273, 0.2144, 0.2176]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
UpperCamelCase__ : Any = torch.manual_seed(__lowerCamelCase )
UpperCamelCase__ : Any = sd_pipe(
[prompt], generator=__lowerCamelCase, guidance_scale=__lowerCamelCase, num_inference_steps=50, output_type='''np''', width=5_12, height=5_12, sld_guidance_scale=20_00, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
UpperCamelCase__ : Tuple = output.images
UpperCamelCase__ : Dict = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = [0.2383, 0.2276, 0.236, 0.2192, 0.2186, 0.2053, 0.1971, 0.1901, 0.1719]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase( self : Optional[Any] ) -> Tuple:
UpperCamelCase__ : Any = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''', safety_checker=__lowerCamelCase )
UpperCamelCase__ : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
UpperCamelCase__ : List[str] = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase__ : Dict = '''padme amidala taking a bath artwork, safe for work, no nudity'''
UpperCamelCase__ : Any = 27_34_97_17_55
UpperCamelCase__ : List[Any] = 7
UpperCamelCase__ : Dict = torch.manual_seed(__lowerCamelCase )
UpperCamelCase__ : Tuple = sd_pipe(
[prompt], generator=__lowerCamelCase, guidance_scale=__lowerCamelCase, num_inference_steps=50, output_type='''np''', width=5_12, height=5_12, sld_guidance_scale=0, )
UpperCamelCase__ : int = output.images
UpperCamelCase__ : Optional[int] = image[0, -3:, -3:, -1]
UpperCamelCase__ : str = [0.3502, 0.3622, 0.3396, 0.3642, 0.3478, 0.3318, 0.35, 0.3348, 0.3297]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
UpperCamelCase__ : Optional[Any] = torch.manual_seed(__lowerCamelCase )
UpperCamelCase__ : Optional[int] = sd_pipe(
[prompt], generator=__lowerCamelCase, guidance_scale=__lowerCamelCase, num_inference_steps=50, output_type='''np''', width=5_12, height=5_12, sld_guidance_scale=20_00, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
UpperCamelCase__ : int = output.images
UpperCamelCase__ : str = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = [0.5531, 0.5206, 0.4895, 0.5156, 0.5182, 0.4751, 0.4802, 0.4803, 0.4443]
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowercase( self : Optional[Any] ) -> Dict:
UpperCamelCase__ : List[Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' )
UpperCamelCase__ : str = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase__ : List[Any] = (
'''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'''
''' leyendecker'''
)
UpperCamelCase__ : Any = 10_44_35_52_34
UpperCamelCase__ : List[Any] = 12
UpperCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase )
UpperCamelCase__ : Union[str, Any] = sd_pipe(
[prompt], generator=__lowerCamelCase, guidance_scale=__lowerCamelCase, num_inference_steps=50, output_type='''np''', width=5_12, height=5_12, sld_guidance_scale=0, )
UpperCamelCase__ : List[Any] = output.images
UpperCamelCase__ : Any = image[0, -3:, -3:, -1]
UpperCamelCase__ : int = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
UpperCamelCase__ : Optional[int] = torch.manual_seed(__lowerCamelCase )
UpperCamelCase__ : Optional[Any] = sd_pipe(
[prompt], generator=__lowerCamelCase, guidance_scale=__lowerCamelCase, num_inference_steps=50, output_type='''np''', width=5_12, height=5_12, sld_guidance_scale=20_00, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : Dict = image[0, -3:, -3:, -1]
UpperCamelCase__ : Union[str, Any] = np.array([0.5818, 0.6285, 0.6835, 0.6019, 0.625, 0.6754, 0.6096, 0.6334, 0.6561] )
assert image.shape == (1, 5_12, 5_12, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 344 | 1 |
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , a__ : UNetaDModel , a__ : UNetaDModel , a__ : DDPMScheduler , a__ : Dict , ):
super().__init__()
UpperCAmelCase = value_function
UpperCAmelCase = unet
UpperCAmelCase = scheduler
UpperCAmelCase = env
UpperCAmelCase = env.get_dataset()
UpperCAmelCase = {}
for key in self.data.keys():
try:
UpperCAmelCase = self.data[key].mean()
except: # noqa: E722
pass
UpperCAmelCase = {}
for key in self.data.keys():
try:
UpperCAmelCase = self.data[key].std()
except: # noqa: E722
pass
UpperCAmelCase = env.observation_space.shape[0]
UpperCAmelCase = env.action_space.shape[0]
def __snake_case ( self : Union[str, Any] , a__ : Tuple , a__ : Optional[int] ):
return (x_in - self.means[key]) / self.stds[key]
def __snake_case ( self : str , a__ : List[Any] , a__ : Dict ):
return x_in * self.stds[key] + self.means[key]
def __snake_case ( self : Dict , a__ : int ):
if type(a__ ) is dict:
return {k: self.to_torch(a__ ) for k, v in x_in.items()}
elif torch.is_tensor(a__ ):
return x_in.to(self.unet.device )
return torch.tensor(a__ , device=self.unet.device )
def __snake_case ( self : Tuple , a__ : List[str] , a__ : Union[str, Any] , a__ : str ):
for key, val in cond.items():
UpperCAmelCase = val.clone()
return x_in
def __snake_case ( self : Dict , a__ : Optional[Any] , a__ : Optional[Any] , a__ : Tuple , a__ : int ):
UpperCAmelCase = x.shape[0]
UpperCAmelCase = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
UpperCAmelCase = torch.full((batch_size,) , a__ , device=self.unet.device , dtype=torch.long )
for _ in range(a__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
UpperCAmelCase = self.value_function(x.permute(0 , 2 , 1 ) , a__ ).sample
UpperCAmelCase = torch.autograd.grad([y.sum()] , [x] )[0]
UpperCAmelCase = self.scheduler._get_variance(a__ )
UpperCAmelCase = torch.exp(0.5 * posterior_variance )
UpperCAmelCase = model_std * grad
UpperCAmelCase = 0
UpperCAmelCase = x.detach()
UpperCAmelCase = x + scale * grad
UpperCAmelCase = self.reset_xa(a__ , a__ , self.action_dim )
UpperCAmelCase = self.unet(x.permute(0 , 2 , 1 ) , a__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
UpperCAmelCase = self.scheduler.step(a__ , a__ , a__ , predict_epsilon=a__ )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
UpperCAmelCase = self.reset_xa(a__ , a__ , self.action_dim )
UpperCAmelCase = self.to_torch(a__ )
return x, y
def __call__( self : List[str] , a__ : List[str] , a__ : Dict=64 , a__ : str=32 , a__ : List[str]=2 , a__ : Any=0.1 ):
# normalize the observations and create batch dimension
UpperCAmelCase = self.normalize(a__ , '''observations''' )
UpperCAmelCase = obs[None].repeat(a__ , axis=0 )
UpperCAmelCase = {0: self.to_torch(a__ )}
UpperCAmelCase = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
UpperCAmelCase = randn_tensor(a__ , device=self.unet.device )
UpperCAmelCase = self.reset_xa(a__ , a__ , self.action_dim )
UpperCAmelCase = self.to_torch(a__ )
# run the diffusion process
UpperCAmelCase, UpperCAmelCase = self.run_diffusion(a__ , a__ , a__ , a__ )
# sort output trajectories by value
UpperCAmelCase = y.argsort(0 , descending=a__ ).squeeze()
UpperCAmelCase = x[sorted_idx]
UpperCAmelCase = sorted_values[:, :, : self.action_dim]
UpperCAmelCase = actions.detach().cpu().numpy()
UpperCAmelCase = self.de_normalize(a__ , key='''actions''' )
# select the action with the highest value
if y is not None:
UpperCAmelCase = 0
else:
# if we didn't run value guiding, select a random action
UpperCAmelCase = np.random.randint(0 , a__ )
UpperCAmelCase = denorm_actions[selected_index, 0]
return denorm_actions
| 705 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a__ : List[str] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str]=8 ) -> str:
"""simple docstring"""
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Tuple , a__ : UNetaDConditionModel , a__ : DDPMScheduler , a__ : VQModel , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __snake_case ( self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : int , a__ : Optional[Any] , a__ : List[Any] , a__ : Union[str, Any] ):
if latents is None:
UpperCAmelCase = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase = latents.to(a__ )
UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def __snake_case ( self : Optional[Any] , a__ : Union[str, Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __snake_case ( self : Union[str, Any] , a__ : List[str]=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCAmelCase = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase, UpperCAmelCase = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : List[Any] ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self : Union[str, Any] , a__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , a__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , a__ : torch.FloatTensor , a__ : int = 512 , a__ : int = 512 , a__ : int = 100 , a__ : float = 4.0 , a__ : int = 1 , a__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[str] = "pil" , a__ : bool = True , ):
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(a__ , a__ ):
UpperCAmelCase = torch.cat(a__ , dim=0 )
if isinstance(a__ , a__ ):
UpperCAmelCase = torch.cat(a__ , dim=0 )
if isinstance(a__ , a__ ):
UpperCAmelCase = torch.cat(a__ , dim=0 )
UpperCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(a__ , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(a__ , dim=0 )
UpperCAmelCase = hint.repeat_interleave(a__ , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
UpperCAmelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
self.scheduler.set_timesteps(a__ , device=a__ )
UpperCAmelCase = self.scheduler.timesteps
UpperCAmelCase = self.movq.config.latent_channels
UpperCAmelCase, UpperCAmelCase = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , a__ , a__ , a__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'''image_embeds''': image_embeds, '''hint''': hint}
UpperCAmelCase = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase, UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase, UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase, UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase, UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(a__ , force_not_quantize=a__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 570 | 0 |
def lowerCamelCase__ ( _lowercase = 10 , _lowercase = 22 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = range(1 , _lowercase )
UpperCAmelCase_ : Optional[int] = range(1 , _lowercase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""") | 30 |
import torch
from torch import nn
class __snake_case ( nn.Module ):
'''simple docstring'''
def __init__( self , A_ , A_ , A_ , A_ , A_=1 , A_=False ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE__ = n_token
SCREAMING_SNAKE_CASE__ = d_embed
SCREAMING_SNAKE_CASE__ = d_proj
SCREAMING_SNAKE_CASE__ = cutoffs + [n_token]
SCREAMING_SNAKE_CASE__ = [0] + self.cutoffs
SCREAMING_SNAKE_CASE__ = div_val
SCREAMING_SNAKE_CASE__ = self.cutoffs[0]
SCREAMING_SNAKE_CASE__ = len(self.cutoffs ) - 1
SCREAMING_SNAKE_CASE__ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.zeros(self.n_clusters ) )
SCREAMING_SNAKE_CASE__ = nn.ModuleList()
SCREAMING_SNAKE_CASE__ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(A_ , A_ ) ) )
else:
self.out_projs.append(A_ )
self.out_layers.append(nn.Linear(A_ , A_ ) )
else:
for i in range(len(self.cutoffs ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE__ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(A_ , A_ ) ) )
self.out_layers.append(nn.Linear(A_ , r_idx - l_idx ) )
SCREAMING_SNAKE_CASE__ = keep_order
def lowercase_ ( self , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if proj is None:
SCREAMING_SNAKE_CASE__ = nn.functional.linear(A_ , A_ , bias=A_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
SCREAMING_SNAKE_CASE__ = nn.functional.linear(A_ , proj.t().contiguous() )
SCREAMING_SNAKE_CASE__ = nn.functional.linear(A_ , A_ , bias=A_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowercase_ ( self , A_ , A_=None , A_=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
SCREAMING_SNAKE_CASE__ = hidden[..., :-1, :].contiguous()
SCREAMING_SNAKE_CASE__ = labels[..., 1:].contiguous()
SCREAMING_SNAKE_CASE__ = hidden.view(-1 , hidden.size(-1 ) )
SCREAMING_SNAKE_CASE__ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
SCREAMING_SNAKE_CASE__ = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE__ = self._compute_logit(A_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
SCREAMING_SNAKE_CASE__ = labels != -1_00
SCREAMING_SNAKE_CASE__ = torch.zeros_like(A_ , dtype=hidden.dtype , device=hidden.device )
SCREAMING_SNAKE_CASE__ = (
-nn.functional.log_softmax(A_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
SCREAMING_SNAKE_CASE__ = nn.functional.log_softmax(A_ , dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE__ = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE__ = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE__ = self.out_layers[i].weight
SCREAMING_SNAKE_CASE__ = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(A_ )
biases.append(A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE__ = self._compute_logit(A_ , A_ , A_ , A_ )
SCREAMING_SNAKE_CASE__ = nn.functional.log_softmax(A_ , dim=1 )
if labels is None:
SCREAMING_SNAKE_CASE__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
SCREAMING_SNAKE_CASE__ = torch.zeros_like(A_ , dtype=hidden.dtype , device=hidden.device )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = [0] + self.cutoffs
for i in range(len(A_ ) - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
SCREAMING_SNAKE_CASE__ = (labels >= l_idx) & (labels < r_idx)
SCREAMING_SNAKE_CASE__ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
SCREAMING_SNAKE_CASE__ = labels.index_select(0 , A_ ) - l_idx
SCREAMING_SNAKE_CASE__ = head_logprob.index_select(0 , A_ )
SCREAMING_SNAKE_CASE__ = hidden.index_select(0 , A_ )
else:
SCREAMING_SNAKE_CASE__ = hidden
if i == 0:
if labels is not None:
SCREAMING_SNAKE_CASE__ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE__ = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE__ = self._compute_logit(A_ , A_ , A_ , A_ )
SCREAMING_SNAKE_CASE__ = nn.functional.log_softmax(A_ , dim=1 )
SCREAMING_SNAKE_CASE__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
SCREAMING_SNAKE_CASE__ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
SCREAMING_SNAKE_CASE__ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
SCREAMING_SNAKE_CASE__ = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , A_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowercase_ ( self , A_ ):
'''simple docstring'''
if self.n_clusters == 0:
SCREAMING_SNAKE_CASE__ = self._compute_logit(A_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(A_ , dim=-1 )
else:
# construct weights and biases
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
SCREAMING_SNAKE_CASE__ = self.out_layers[0].weight[l_idx:r_idx]
SCREAMING_SNAKE_CASE__ = self.out_layers[0].bias[l_idx:r_idx]
else:
SCREAMING_SNAKE_CASE__ = self.out_layers[i].weight
SCREAMING_SNAKE_CASE__ = self.out_layers[i].bias
if i == 0:
SCREAMING_SNAKE_CASE__ = torch.cat([weight_i, self.cluster_weight] , dim=0 )
SCREAMING_SNAKE_CASE__ = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(A_ )
biases.append(A_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = weights[0], biases[0], self.out_projs[0]
SCREAMING_SNAKE_CASE__ = self._compute_logit(A_ , A_ , A_ , A_ )
SCREAMING_SNAKE_CASE__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
SCREAMING_SNAKE_CASE__ = nn.functional.log_softmax(A_ , dim=1 )
SCREAMING_SNAKE_CASE__ = [0] + self.cutoffs
for i in range(len(A_ ) - 1 ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
SCREAMING_SNAKE_CASE__ = head_logprob[:, : self.cutoffs[0]]
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = weights[i], biases[i], self.out_projs[i]
SCREAMING_SNAKE_CASE__ = self._compute_logit(A_ , A_ , A_ , A_ )
SCREAMING_SNAKE_CASE__ = nn.functional.log_softmax(A_ , dim=1 )
SCREAMING_SNAKE_CASE__ = head_logprob[:, -i] + tail_logprob_i
SCREAMING_SNAKE_CASE__ = logprob_i
return out
| 100 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
UpperCAmelCase__ : int = StableDiffusionInstructPixaPixPipeline
UpperCAmelCase__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase(self : List[Any] ) -> Optional[int]:
torch.manual_seed(0 )
snake_case = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
snake_case = PNDMScheduler(skip_prk_steps=lowercase_ )
torch.manual_seed(0 )
snake_case = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
snake_case = CLIPTextModel(lowercase_ )
snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase(self : str , _A : Dict , _A : List[Any]=0 ) -> Optional[int]:
snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(lowercase_ ) ).convert("RGB" )
if str(lowercase_ ).startswith("mps" ):
snake_case = torch.manual_seed(lowercase_ )
else:
snake_case = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
snake_case = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase(self : Dict ) -> Dict:
snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case = self.get_dummy_components()
snake_case = StableDiffusionInstructPixaPixPipeline(**lowercase_ )
snake_case = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = self.get_dummy_inputs(lowercase_ )
snake_case = sd_pipe(**lowercase_ ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase(self : Optional[Any] ) -> Optional[int]:
snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case = self.get_dummy_components()
snake_case = StableDiffusionInstructPixaPixPipeline(**lowercase_ )
snake_case = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = self.get_dummy_inputs(lowercase_ )
snake_case = "french fries"
snake_case = sd_pipe(**lowercase_ , negative_prompt=lowercase_ )
snake_case = output.images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase(self : Optional[Any] ) -> Optional[int]:
snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case = self.get_dummy_components()
snake_case = StableDiffusionInstructPixaPixPipeline(**lowercase_ )
snake_case = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = self.get_dummy_inputs(lowercase_ )
snake_case = [inputs["prompt"]] * 2
snake_case = np.array(inputs["image"] ).astype(np.floataa ) / 2_55.0
snake_case = torch.from_numpy(lowercase_ ).unsqueeze(0 ).to(lowercase_ )
snake_case = image / 2 + 0.5
snake_case = image.permute(0 , 3 , 1 , 2 )
snake_case = image.repeat(2 , 1 , 1 , 1 )
snake_case = sd_pipe(**lowercase_ ).images
snake_case = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
snake_case = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase(self : str ) -> Union[str, Any]:
snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case = self.get_dummy_components()
snake_case = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" )
snake_case = StableDiffusionInstructPixaPixPipeline(**lowercase_ )
snake_case = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = self.get_dummy_inputs(lowercase_ )
snake_case = sd_pipe(**lowercase_ ).images
snake_case = image[0, -3:, -3:, -1]
snake_case = [round(lowercase_ , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(lowercase_ ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
snake_case = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase(self : Dict ) -> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase(self : Union[str, Any] ) -> Union[str, Any]:
snake_case = self.get_dummy_components()
snake_case = StableDiffusionInstructPixaPixPipeline(**lowercase_ )
snake_case = VaeImageProcessor(do_resize=lowercase_ , do_normalize=lowercase_ )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = pipe(**self.get_dummy_inputs_by_type(lowercase_ , input_image_type="pt" ) )[0]
snake_case = components["vae"]
snake_case = self.get_dummy_inputs_by_type(lowercase_ , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case = vae.encode(inputs[image_param] ).latent_dist.mode()
snake_case = pipe(**lowercase_ )[0]
snake_case = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowercase_ , 1E-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def UpperCAmelCase(self : Dict ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase(self : Any , _A : int=0 ) -> Tuple:
snake_case = torch.manual_seed(lowercase_ )
snake_case = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
snake_case = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase(self : int ) -> Optional[Any]:
snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
snake_case = self.get_inputs()
snake_case = pipe(**lowercase_ ).images
snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase(self : str ) -> List[str]:
snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowercase_ )
snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
snake_case = self.get_inputs()
snake_case = pipe(**lowercase_ ).images
snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase(self : Tuple ) -> Dict:
snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowercase_ )
snake_case = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
snake_case = self.get_inputs()
snake_case = pipe(**lowercase_ ).images
snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase(self : Union[str, Any] ) -> int:
snake_case = 0
def callback_fn(_A : Union[str, Any] , _A : Dict , _A : Optional[int] ) -> None:
snake_case = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case = latents[0, -3:, -3:, -1]
snake_case = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
snake_case = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case = latents[0, -3:, -3:, -1]
snake_case = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
snake_case = False
snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowercase_ , torch_dtype=torch.floataa )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
snake_case = self.get_inputs()
pipe(**lowercase_ , callback=lowercase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase(self : Any ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowercase_ , torch_dtype=torch.floataa )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case = self.get_inputs()
snake_case = pipe(**lowercase_ )
snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def UpperCAmelCase(self : Optional[int] ) -> Optional[int]:
snake_case = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case = inputs["image"].resize((5_0_4, 5_0_4) )
snake_case = "timbrooks/instruct-pix2pix"
snake_case = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
snake_case = pipe(**lowercase_ )
snake_case = output.images[0]
snake_case = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
snake_case = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 704 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCamelCase ( A_ , unittest.TestCase ):
UpperCAmelCase__ : Any = CpmAntTokenizer
UpperCAmelCase__ : Optional[Any] = False
def UpperCAmelCase(self : Optional[Any] ) -> Dict:
super().setUp()
snake_case = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def UpperCAmelCase(self : Optional[Any] ) -> Optional[int]:
snake_case = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
snake_case = "今天天气真好!"
snake_case = ["今天", "天气", "真", "好", "!"]
snake_case = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
snake_case = "今天天气真好!"
snake_case = [tokenizer.bos_token] + tokens
snake_case = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
snake_case = tokenizer.decode(_A )
self.assertEqual(_A , _A )
| 294 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = 0
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : str = Path(lowerCAmelCase ) / """preprocessor_config.json"""
__lowerCAmelCase : Union[str, Any] = Path(lowerCAmelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowerCAmelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCAmelCase , """w""" ) )
__lowerCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Dict = Path(lowerCAmelCase ) / """preprocessor_config.json"""
__lowerCAmelCase : Dict = Path(lowerCAmelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(lowerCAmelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCAmelCase , """w""" ) )
__lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Union[str, Any] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__lowerCAmelCase : Dict = Path(lowerCAmelCase ) / """preprocessor_config.json"""
__lowerCAmelCase : List[Any] = Path(lowerCAmelCase ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowerCAmelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCAmelCase , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__lowerCAmelCase : List[Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase ).to_dict()
config_dict.pop("""image_processor_type""" )
__lowerCAmelCase : Dict = CLIPImageProcessor(**lowerCAmelCase )
# save in new folder
model_config.save_pretrained(lowerCAmelCase )
config.save_pretrained(lowerCAmelCase )
__lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained(lowerCAmelCase )
# make sure private variable is not incorrectly saved
__lowerCAmelCase : List[str] = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Any = Path(lowerCAmelCase ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(lowerCAmelCase , """w""" ) , )
__lowerCAmelCase : Any = AutoImageProcessor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase , """clip-base is not a local folder and is not a valid model identifier""" ):
__lowerCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained("""clip-base""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCAmelCase : Dict = AutoImageProcessor.from_pretrained(lowerCAmelCase , revision="""aaaaaa""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
lowerCAmelCase , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__lowerCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
"""simple docstring"""
with self.assertRaises(lowerCAmelCase ):
__lowerCAmelCase : List[Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase ):
__lowerCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCAmelCase )
__lowerCAmelCase : List[str] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCAmelCase )
__lowerCAmelCase : Any = AutoImageProcessor.from_pretrained(lowerCAmelCase , trust_remote_code=lowerCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , lowerCAmelCase )
AutoImageProcessor.register(lowerCAmelCase , lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase ):
AutoImageProcessor.register(lowerCAmelCase , lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase : Dict = Path(lowerCAmelCase ) / """preprocessor_config.json"""
__lowerCAmelCase : Optional[Any] = Path(lowerCAmelCase ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(lowerCAmelCase , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(lowerCAmelCase , """w""" ) )
__lowerCAmelCase : List[Any] = CustomImageProcessor.from_pretrained(lowerCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Dict =True
try:
AutoConfig.register("""custom""" , lowerCAmelCase )
AutoImageProcessor.register(lowerCAmelCase , lowerCAmelCase )
# If remote code is not set, the default is to use local
__lowerCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__lowerCAmelCase : Any = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__lowerCAmelCase : Any = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=lowerCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(lowerCAmelCase , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 651 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def snake_case_ (*__A : Optional[Any] ) -> str:
with open(__A , """r""" ) as fh:
fcntl.flock(__A , fcntl.LOCK_EX )
try:
print(*__A )
finally:
fcntl.flock(__A , fcntl.LOCK_UN )
__UpperCAmelCase = int(os.environ["""LOCAL_RANK"""])
torch.cuda.set_device(local_rank)
__UpperCAmelCase = torch.device("""cuda""", local_rank)
__UpperCAmelCase = socket.gethostname()
__UpperCAmelCase = F'[{hostname}-{local_rank}]'
try:
# test distributed
dist.init_process_group("""nccl""")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__UpperCAmelCase = dist.get_rank()
__UpperCAmelCase = dist.get_world_size()
printflock(F'{gpu} is OK (global rank: {rank}/{world_size})')
dist.barrier()
if rank == 0:
printflock(F'pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}')
except Exception:
printflock(F'{gpu} is broken')
raise
| 651 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = XLMRobertaTokenizer
lowercase_ = XLMRobertaTokenizerFast
lowercase_ = True
lowercase_ = True
def snake_case ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : int = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case ( self : str ):
lowercase__ : Optional[int] = "<pad>"
lowercase__ : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1_002 )
def snake_case ( self : int ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_002 )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase__ : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase__ : Tuple = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowercase__ : Tuple = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE )
self.assertListEqual(
SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def snake_case ( self : Union[str, Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase__ : List[str] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : int = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : int = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = tempfile.mkdtemp()
lowercase__ : Any = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
lowercase__ : Union[str, Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
lowercase__ : Any = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=True
lowercase__ : List[str] = tempfile.mkdtemp()
lowercase__ : Tuple = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE )
# Checks it save with the same files
self.assertSequenceEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Checks everything loads correctly in the same way
lowercase__ : int = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
shutil.rmtree(SCREAMING_SNAKE_CASE )
# Save tokenizer rust, legacy_format=False
lowercase__ : Optional[int] = tempfile.mkdtemp()
lowercase__ : Dict = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase__ : Any = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
shutil.rmtree(SCREAMING_SNAKE_CASE )
@cached_property
def snake_case ( self : List[str] ):
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def snake_case ( self : List[str] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(SCREAMING_SNAKE_CASE , f.name )
lowercase__ : List[str] = XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = pickle.dumps(SCREAMING_SNAKE_CASE )
pickle.loads(SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
if not self.test_rust_tokenizer:
return
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Optional[Any] = self.get_rust_tokenizer()
lowercase__ : Union[str, Any] = "I was born in 92000, and this is falsé."
lowercase__ : Dict = tokenizer.tokenize(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : str = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = self.get_rust_tokenizer()
lowercase__ : Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : List[Any] ):
lowercase__ : Tuple = "Hello World!"
lowercase__ : Dict = [0, 35_378, 6_661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE ) )
@slow
def snake_case ( self : Optional[Any] ):
lowercase__ : Any = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowercase__ : Optional[int] = [
0,
3_293,
83,
10,
4_552,
4_989,
7_986,
678,
10,
5_915,
111,
179_459,
124_850,
4,
6_044,
237,
12,
6,
5,
6,
4,
6_780,
705,
15,
1_388,
44,
378,
10_114,
711,
152,
20,
6,
5,
22_376,
642,
1_221,
15_190,
34_153,
450,
5_608,
959,
1_119,
57_702,
136,
186,
47,
1_098,
29_367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6_044,
237,
6_284,
50_901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE ) )
@slow
def snake_case ( self : Tuple ):
# fmt: off
lowercase__ : List[str] = {"input_ids": [[0, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [0, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 81 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : int = dict(scheduler.config )
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = dict(scheduler.config )
lowercase__ : Union[str, Any] = True
lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Union[str, Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
lowercase__ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase__ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 81 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _lowerCamelCase( _a ):
lowercase_ : Any = """M-CLIP"""
def __init__( self, lowerCamelCase=10_24, lowerCamelCase=7_68, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = transformerDimSize
_lowercase : Union[str, Any] = imageDimSize
super().__init__(**lowerCamelCase)
class _lowerCamelCase( _a ):
lowercase_ : Union[str, Any] = MCLIPConfig
def __init__( self, lowerCamelCase, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
super().__init__(lowerCamelCase, *lowerCamelCase, **lowerCamelCase)
_lowercase : Dict = XLMRobertaModel(lowerCamelCase)
_lowercase : Any = torch.nn.Linear(
in_features=config.transformerDimensions, out_features=config.numDims)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Tuple = self.transformer(input_ids=lowerCamelCase, attention_mask=lowerCamelCase)[0]
_lowercase : Union[str, Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(lowerCamelCase), embs
| 89 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCamelCase( unittest.TestCase ):
snake_case_ : int = MODEL_FOR_MASKED_LM_MAPPING
snake_case_ : Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
__snake_case = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 3_8_0_1_5, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 2_5_5_0_6, "token_str": " accuser"},
] , )
__snake_case = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 3_8_0_1_5,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 2_5_5_0_6,
"token_str": " accuser",
},
] , )
__snake_case = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 1_3_6_0_6, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3_4_9_9, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2_9_4_1, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
__snake_case = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
__snake_case = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 3_5_6_7_6, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS"},
] , )
__snake_case = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 3_5_6_7_6,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS"},
] , )
__snake_case = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3_4_9_9, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2_9_4_1, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 1_3_6_0_6, "token_str": " Clara"},
] , )
__snake_case = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE , decimals=6 ) , [
[
{
"score": 2.2e-0_5,
"token": 3_5_6_7_6,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 3_5_6_7_6,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 1_6_4_1_6, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
__snake_case = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
__snake_case = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(SCREAMING_SNAKE_CASE )
@slow
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
'''simple docstring'''
__snake_case = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [
{"sequence": "My name is John", "score": 0.008, "token": 6_1_0, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1_5_7_3, "token_str": " Chris"},
] , )
__snake_case = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2_2_0_1,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 1_2_7_9_0,
"token_str": " Lyon",
},
] , )
__snake_case = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3_4_9_9, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 1_3_6_0_6, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2_9_4_1, "token_str": " Te"},
] , )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> str:
'''simple docstring'''
__snake_case = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
__snake_case = None
__snake_case = None
self.run_pipeline_test(SCREAMING_SNAKE_CASE , [] )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
__snake_case = None
__snake_case = None
self.run_pipeline_test(SCREAMING_SNAKE_CASE , [] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ) -> int:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
__snake_case = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case = fill_masker.tokenizer
__snake_case = fill_masker.model
__snake_case = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
] , )
__snake_case = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
] , )
__snake_case = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
],
[
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
],
] , )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(SCREAMING_SNAKE_CASE ):
fill_masker("This is" )
self.run_test_top_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.run_test_targets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.run_test_top_k_targets(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.fill_mask_with_duplicate_targets_and_top_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.fill_mask_with_multiple_masks(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
'''simple docstring'''
__snake_case = tokenizer.get_vocab()
__snake_case = sorted(vocab.keys() )[:2]
# Pipeline argument
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , targets=SCREAMING_SNAKE_CASE )
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
] , )
__snake_case = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , SCREAMING_SNAKE_CASE )
__snake_case = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(SCREAMING_SNAKE_CASE ) )
# Call argument
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=SCREAMING_SNAKE_CASE )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
] , )
__snake_case = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , SCREAMING_SNAKE_CASE )
__snake_case = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(SCREAMING_SNAKE_CASE ) )
# Score equivalence
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=SCREAMING_SNAKE_CASE )
__snake_case = [top_mask["token_str"] for top_mask in outputs]
__snake_case = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(SCREAMING_SNAKE_CASE ) == set(SCREAMING_SNAKE_CASE ):
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=SCREAMING_SNAKE_CASE )
__snake_case = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , nested_simplify(SCREAMING_SNAKE_CASE ) )
# Raises with invalid
with self.assertRaises(SCREAMING_SNAKE_CASE ):
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(SCREAMING_SNAKE_CASE ):
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""] )
with self.assertRaises(SCREAMING_SNAKE_CASE ):
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , top_k=2 )
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
] , )
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
] , )
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , nested_simplify(SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
'''simple docstring'''
__snake_case = tokenizer.get_vocab()
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
# top_k=2, ntargets=3
__snake_case = sorted(vocab.keys() )[:3]
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=SCREAMING_SNAKE_CASE )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__snake_case = [el["token_str"] for el in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(SCREAMING_SNAKE_CASE ).issubset(SCREAMING_SNAKE_CASE ):
__snake_case = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=SCREAMING_SNAKE_CASE )
# They should yield exactly the same result
self.assertEqual(nested_simplify(SCREAMING_SNAKE_CASE ) , nested_simplify(SCREAMING_SNAKE_CASE ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
'''simple docstring'''
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
__snake_case = tokenizer.get_vocab()
# String duplicates + id duplicates
__snake_case = sorted(vocab.keys() )[:3]
__snake_case = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__snake_case = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=SCREAMING_SNAKE_CASE , top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 3 )
def SCREAMING_SNAKE_CASE_ ( self : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str ) -> Optional[Any]:
'''simple docstring'''
__snake_case = FillMaskPipeline(model=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
__snake_case = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
SCREAMING_SNAKE_CASE , [
[
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
],
[
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
],
[
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
{"sequence": ANY(SCREAMING_SNAKE_CASE ), "score": ANY(SCREAMING_SNAKE_CASE ), "token": ANY(SCREAMING_SNAKE_CASE ), "token_str": ANY(SCREAMING_SNAKE_CASE )},
],
] , )
| 371 | 0 |
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_a : Dict = input("""Enter image url: """).strip()
print(f'Downloading image from {url} ...')
_a : str = BeautifulSoup(requests.get(url).content, """html.parser""")
# The image URL is in the content field of the first meta tag with property og:image
_a : str = soup.find("""meta""", {"""property""": """og:image"""})["""content"""]
_a : Dict = requests.get(image_url).content
_a : str = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, """wb""") as fp:
fp.write(image_data)
print(f'Done. Image saved to disk as {file_name}.')
| 87 |
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class _UpperCAmelCase ( unittest.TestCase):
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_snake_case : List[Any] = Vector()
def lowerCamelCase__ ( self ):
_snake_case : Any = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(snake_case_ ) , "(0,0,0,0,0,1)" )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 2, 3, 4] )
self.assertEqual(len(snake_case_ ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2] )
_snake_case : List[str] = Vector([1, 2, 3, 4, 5] )
_snake_case : List[Any] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_snake_case : Any = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Vector([1, 2, 3] )
_snake_case : Any = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCamelCase__ ( self ):
_snake_case : str = Vector([1, 2, 3] )
_snake_case : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Vector([1, 2, 3] )
_snake_case : List[Any] = Vector([2, -1, 4] ) # for test of dot product
_snake_case : Union[str, Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def lowerCamelCase__ ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Vector([1, 2, 3] )
_snake_case : Optional[Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , snake_case_ , snake_case_ ) ) , "(3,4,7)" )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Vector([1, 0, 0, 0, 0, 0] )
_snake_case : Optional[int] = x.copy()
self.assertEqual(str(snake_case_ ) , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(snake_case_ ) , "(0,1,0)" )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : str = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(snake_case_ , snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCamelCase__ ( self ):
_snake_case : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_snake_case : List[str] = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def lowerCamelCase__ ( self ):
_snake_case : Optional[int] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(snake_case_ ) )
def lowerCamelCase__ ( self ):
_snake_case : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCamelCase__ ( self ):
_snake_case : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : int = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def lowerCamelCase__ ( self ):
_snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_snake_case : Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def lowerCamelCase__ ( self ):
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 87 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
A_ : Any = [8, 5, 9, 7]
A_ : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A_ : int = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowerCamelCase :
def __init__( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , ) -> None:
SCREAMING_SNAKE_CASE__ = claim_vector
SCREAMING_SNAKE_CASE__ = allocated_resources_table
SCREAMING_SNAKE_CASE__ = maximum_claim_table
def SCREAMING_SNAKE_CASE ( self : int ) -> list[int]:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> list[int]:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def SCREAMING_SNAKE_CASE ( self : int ) -> list[list[int]]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_lowerCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def SCREAMING_SNAKE_CASE ( self : Any ) -> dict[int, list[int]]:
return {self.__need().index(_lowerCAmelCase ): i for i in self.__need()}
def SCREAMING_SNAKE_CASE ( self : Any , **__UpperCAmelCase : str ) -> None:
SCREAMING_SNAKE_CASE__ = self.__need()
SCREAMING_SNAKE_CASE__ = self.__allocated_resources_table
SCREAMING_SNAKE_CASE__ = self.__available_resources()
SCREAMING_SNAKE_CASE__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 5_0 + """\n""" )
while need_list:
SCREAMING_SNAKE_CASE__ = False
for each_need in need_list:
SCREAMING_SNAKE_CASE__ = True
for index, need in enumerate(_lowerCAmelCase ):
if need > available_resources[index]:
SCREAMING_SNAKE_CASE__ = False
break
if execution:
SCREAMING_SNAKE_CASE__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
SCREAMING_SNAKE_CASE__ = original_need_index
print(F"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(_lowerCAmelCase )
# update available/freed resources stack
SCREAMING_SNAKE_CASE__ = np.array(_lowerCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(_lowerCAmelCase ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F"""P{self.__allocated_resources_table.index(_lowerCAmelCase ) + 1}"""
+ """ """.join(F"""{it:>8}""" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F"""P{self.__maximum_claim_table.index(_lowerCAmelCase ) + 1}"""
+ """ """.join(F"""{it:>8}""" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(_lowerCAmelCase ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(_lowerCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 |
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
lowercase_ : Any = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
lowercase_ : str = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[int] ):
lowercase = (images / 2 + 0.5).clamp(0 , 1 )
lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase = numpy_to_pil(lowercase_ )
return images
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
if images.ndim == 3:
lowercase = images[None, ...]
lowercase = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
lowercase = [Image.fromarray(lowercase_ ) for image in images]
return pil_images
| 588 | 0 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
_lowerCAmelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __snake_case ( SCREAMING_SNAKE_CASE: int = 5000 ):
"""simple docstring"""
_lowerCAmelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , SCREAMING_SNAKE_CASE )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) ):
_lowerCAmelCase = pentagonal_nums[j]
_lowerCAmelCase = pentagonal_i + pentagonal_j
_lowerCAmelCase = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE ) and is_pentagonal(SCREAMING_SNAKE_CASE ):
return b
return -1
if __name__ == "__main__":
print(f'{solution() = }')
| 491 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE: str ):
"""simple docstring"""
_lowerCAmelCase = [int(SCREAMING_SNAKE_CASE ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(SCREAMING_SNAKE_CASE ) == 4 and all(0 <= int(SCREAMING_SNAKE_CASE ) <= 254 for octet in octets )
if __name__ == "__main__":
_snake_case = input().strip()
_snake_case = '''valid''' if is_ip_va_address_valid(ip) else '''invalid'''
print(f'{ip} is a {valid_or_invalid} IP v4 address.')
| 491 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_lowerCAmelCase :int = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : tuple , UpperCamelCase__ : Path , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=False , ):
output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
else:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
@torch.no_grad()
def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : bool = False ):
_UpperCAmelCase : Optional[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_UpperCAmelCase : int = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
_UpperCAmelCase : Dict = '''cpu'''
_UpperCAmelCase : Optional[int] = Path(UpperCamelCase__ )
# VAE DECODER
_UpperCAmelCase : List[str] = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
_UpperCAmelCase : Optional[int] = vae_decoder.config.latent_channels
# forward only through the decoder part
_UpperCAmelCase : Optional[Any] = vae_decoder.decode
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , 25 , 25 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=UpperCamelCase__ , )
del vae_decoder
if __name__ == "__main__":
_lowerCAmelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
_lowerCAmelCase :str = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 506 |
"""simple docstring"""
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str=False ):
try:
_UpperCAmelCase : Tuple = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase : str = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase : List[str] = strtobool(UpperCamelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
_lowerCAmelCase :Optional[Any] = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase_ (UpperCamelCase__ : int ):
return unittest.skip('''Test was skipped''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Dict ):
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : int ):
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Tuple ):
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[Any] ):
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Dict ):
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : str=None , UpperCamelCase__ : str=None ):
if test_case is None:
return partial(UpperCamelCase__ , version=UpperCamelCase__ )
return unittest.skipUnless(is_torch_version('''>=''' , UpperCamelCase__ ) , F'test requires torch version >= {version}' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : List[str] ):
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Any ):
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ):
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(UpperCamelCase__ )
_lowerCAmelCase :Union[str, Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase_ (UpperCamelCase__ : str ):
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(UpperCamelCase__ )
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a__ =True
@classmethod
def __lowerCAmelCase ( cls ) -> Union[str, Any]:
_UpperCAmelCase : Tuple = tempfile.mkdtemp()
@classmethod
def __lowerCAmelCase ( cls ) -> List[Any]:
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def __lowerCAmelCase ( self ) -> Dict:
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('''**/*''' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(A )
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self ) -> Union[str, Any]:
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self , A ) -> List[Any]:
_UpperCAmelCase : Any = mocks if isinstance(A , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
_UpperCAmelCase : Union[str, Any] = AcceleratorState()
_UpperCAmelCase : Union[str, Any] = tensor[None].clone().to(state.device )
_UpperCAmelCase : List[str] = gather(UpperCamelCase__ ).cpu()
_UpperCAmelCase : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , UpperCamelCase__ ):
return False
return True
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self , A , A , A ) -> Optional[int]:
_UpperCAmelCase : List[Any] = returncode
_UpperCAmelCase : Any = stdout
_UpperCAmelCase : Tuple = stderr
async def lowerCamelCase_ (UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
while True:
_UpperCAmelCase : Tuple = await stream.readline()
if line:
callback(UpperCamelCase__ )
else:
break
async def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : str=False ):
if echo:
print('''\nRunning: ''' , ''' '''.join(UpperCamelCase__ ) )
_UpperCAmelCase : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=UpperCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCamelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Optional[int] = []
def tee(UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Dict="" ):
_UpperCAmelCase : int = line.decode('''utf-8''' ).rstrip()
sink.append(UpperCamelCase__ )
if not quiet:
print(UpperCamelCase__ , UpperCamelCase__ , file=UpperCamelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda UpperCamelCase__ : tee(UpperCamelCase__ , UpperCamelCase__ , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda UpperCamelCase__ : tee(UpperCamelCase__ , UpperCamelCase__ , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=UpperCamelCase__ , )
return _RunOutput(await p.wait() , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Union[str, Any]=180 , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=True ):
_UpperCAmelCase : Optional[int] = asyncio.get_event_loop()
_UpperCAmelCase : Union[str, Any] = loop.run_until_complete(
_stream_subprocess(UpperCamelCase__ , env=UpperCamelCase__ , stdin=UpperCamelCase__ , timeout=UpperCamelCase__ , quiet=UpperCamelCase__ , echo=UpperCamelCase__ ) )
_UpperCAmelCase : Tuple = ''' '''.join(UpperCamelCase__ )
if result.returncode > 0:
_UpperCAmelCase : Any = '''\n'''.join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
return result
class _UpperCAmelCase ( a ):
'''simple docstring'''
pass
def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : str=False ):
try:
_UpperCAmelCase : Union[str, Any] = subprocess.check_output(UpperCamelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(UpperCamelCase__ , '''decode''' ):
_UpperCAmelCase : List[Any] = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'Command `{" ".join(UpperCamelCase__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 506 | 1 |
from itertools import count
def lowercase_ ( _UpperCamelCase = 50 ):
'''simple docstring'''
__lowercase = [1] * min_block_length
for n in count(_UpperCamelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCamelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 714 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = ["speech"]
def __init__( self , *snake_case_ , **snake_case_ ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''speech'''] )
class lowerCamelCase_ ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = ["speech"]
def __init__( self , *snake_case_ , **snake_case_ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''speech'''] )
| 527 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 271 | import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __magic_name__ ( __a , __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : Dict = VQModel
lowerCAmelCase : List[str] = '''sample'''
@property
def lowerCAmelCase ( self : List[Any] , _lowercase : Tuple=(32, 32) ):
"""simple docstring"""
_UpperCamelCase: List[Any] = 4
_UpperCamelCase: Tuple = 3
_UpperCamelCase: List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(_lowercase )
return {"sample": image}
@property
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : str ):
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: Any = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
_UpperCamelCase: Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: int = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_lowercase )
_UpperCamelCase: Tuple = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: Dict = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(_lowercase ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
_UpperCamelCase: int = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
_UpperCamelCase: Tuple = image.to(_lowercase )
with torch.no_grad():
_UpperCamelCase: Optional[int] = model(_lowercase ).sample
_UpperCamelCase: List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCamelCase: Any = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-3 ) ) | 271 | 1 |
'''simple docstring'''
snake_case_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
snake_case_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : dict[int, list[int]] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[bool] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
order.append(SCREAMING_SNAKE_CASE_ )
return order
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : dict[int, list[int]] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[bool] ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : List[Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return component
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = len(SCREAMING_SNAKE_CASE_ ) * [False]
SCREAMING_SNAKE_CASE_ : dict[int, list[int]] = {vert: [] for vert in range(len(SCREAMING_SNAKE_CASE_ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : List[str] = []
for i, was_visited in enumerate(SCREAMING_SNAKE_CASE_ ):
if not was_visited:
order += topology_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) * [False]
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
SCREAMING_SNAKE_CASE_ : Dict = order[len(SCREAMING_SNAKE_CASE_ ) - i - 1]
if not visited[vert]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = find_components(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
components_list.append(SCREAMING_SNAKE_CASE_ )
return components_list
| 700 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.nn.Linear(2 , 4 )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
SCREAMING_SNAKE_CASE_ : Any = torch.optim.lr_scheduler.OneCycleLR(SCREAMING_SNAKE_CASE_ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
SCREAMING_SNAKE_CASE_ : Dict = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
SCREAMING_SNAKE_CASE_ : Tuple = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Tuple:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
@require_cuda
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = Accelerator(cpu=lowercase__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Accelerator()
SCREAMING_SNAKE_CASE_ : Any = GradientState()
assert state.num_steps == 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
SCREAMING_SNAKE_CASE_ : Optional[int] = False
assert state.sync_gradients is False
GradientState._reset_state()
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = create_components()
(
(
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
), (
SCREAMING_SNAKE_CASE_
),
) : Optional[Any] = accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __lowerCamelCase ( self ):
"""simple docstring"""
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*lowercase__ , **lowercase__ ):
pass
with patch("torch.cuda.set_device" , lowercase__ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
SCREAMING_SNAKE_CASE_ : List[str] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = get_signature(lowercase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = create_components()
accelerator.prepare(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_signature(lowercase__ )
# saving hook
def save_config(lowercase__ , lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"class_name": models[0].__class__.__name__}
with open(os.path.join(lowercase__ , "data.json" ) , "w" ) as f:
json.dump(lowercase__ , lowercase__ )
# loading hook
def load_config(lowercase__ , lowercase__ ):
with open(os.path.join(lowercase__ , "data.json" ) , "r" ) as f:
SCREAMING_SNAKE_CASE_ : Any = json.load(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = config["class_name"]
SCREAMING_SNAKE_CASE_ : Dict = accelerator.register_save_state_pre_hook(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = accelerator.register_load_state_pre_hook(lowercase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match with hooks
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "random"
# make sure loaded weights match with hooks
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(lowercase__ )
# make sure random weights don't match with hooks removed
load_random_weights(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
SCREAMING_SNAKE_CASE_ : Tuple = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(lowercase__ )
self.assertTrue(abs(model_signature - get_signature(lowercase__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = create_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
# This should work
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertTrue(dummy_obj is None )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = Accelerator()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = create_components()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [1, 2, 3]
# This should work
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(lowercase__ , "_is_accelerate_prepared" , lowercase__ ) , lowercase__ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map={"": 0} , )
SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator()
# This should work
SCREAMING_SNAKE_CASE_ : List[Any] = accelerator.prepare(lowercase__ )
@slow
@require_bnb
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE_ : Optional[Any] = Accelerator()
with init_empty_weights():
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
SCREAMING_SNAKE_CASE_ : Optional[Any] = infer_auto_device_map(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = "cpu"
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=lowercase__ , load_in_abit=lowercase__ , llm_inta_enable_fpaa_cpu_offload=lowercase__ )
# This should not work and get value error
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : str = accelerator.prepare(lowercase__ )
@slow
@require_bnb
@require_multi_gpu
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
SCREAMING_SNAKE_CASE_ : str = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
SCREAMING_SNAKE_CASE_ : str = infer_auto_device_map(lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = 1
SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map=lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(lowercase__ ):
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.prepare(lowercase__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __lowerCamelCase ( self ):
"""simple docstring"""
from transformers import AutoModelForCausalLM
with init_empty_weights():
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = infer_auto_device_map(lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : str = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=lowercase__ , device_map=lowercase__ , )
SCREAMING_SNAKE_CASE_ : Any = Accelerator()
# This should work
SCREAMING_SNAKE_CASE_ : Optional[int] = accelerator.prepare(lowercase__ )
@require_cuda
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = torch.nn.Linear(10 , 10 )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.optim.SGD(model.parameters() , lr=0.01 )
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator(cpu=lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = accelerator.prepare(lowercase__ )
| 68 | 0 |
def __magic_name__ ( __lowerCAmelCase : int ) -> List[str]:
__lowerCamelCase = len(_A )
__lowerCamelCase = sum(_A )
__lowerCamelCase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__lowerCamelCase = True
for i in range(1 , s + 1 ):
__lowerCamelCase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__lowerCamelCase = dp[i][j - 1]
if arr[i - 1] <= j:
__lowerCamelCase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__lowerCamelCase = s - 2 * j
break
return diff
| 298 | import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_: Tuple = logging.get_logger(__name__)
A_: str = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'bridgetower_vision_model'
def __init__( self , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=288 , UpperCAmelCase=1 , UpperCAmelCase=1e-05 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase )
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_channels
_lowercase = patch_size
_lowercase = image_size
_lowercase = initializer_factor
_lowercase = layer_norm_eps
_lowercase = stop_gradient
_lowercase = share_layernorm
_lowercase = remove_last_layer
@classmethod
def _UpperCAmelCase ( cls , UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
_lowercase , _lowercase = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
if config_dict.get("""model_type""" ) == "bridgetower":
_lowercase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'bridgetower_text_model'
def __init__( self , UpperCAmelCase=50265 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=1 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=514 , UpperCAmelCase=1 , UpperCAmelCase=1e-05 , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase="absolute" , UpperCAmelCase=True , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase )
_lowercase = vocab_size
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = hidden_act
_lowercase = initializer_factor
_lowercase = intermediate_size
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = max_position_embeddings
_lowercase = type_vocab_size
_lowercase = layer_norm_eps
_lowercase = position_embedding_type
_lowercase = use_cache
_lowercase = pad_token_id
_lowercase = bos_token_id
_lowercase = eos_token_id
@classmethod
def _UpperCAmelCase ( cls , UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
_lowercase , _lowercase = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
if config_dict.get("""model_type""" ) == "bridgetower":
_lowercase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class _lowercase ( _UpperCAmelCase ):
"""simple docstring"""
lowerCAmelCase__ = 'bridgetower'
def __init__( self , UpperCAmelCase=True , UpperCAmelCase="gelu" , UpperCAmelCase=768 , UpperCAmelCase=1 , UpperCAmelCase=1e-05 , UpperCAmelCase=False , UpperCAmelCase="add" , UpperCAmelCase=12 , UpperCAmelCase=6 , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase , ):
'''simple docstring'''
_lowercase = kwargs.pop("""text_config_dict""" , UpperCAmelCase )
_lowercase = kwargs.pop("""vision_config_dict""" , UpperCAmelCase )
super().__init__(**UpperCAmelCase )
_lowercase = share_cross_modal_transformer_layers
_lowercase = hidden_act
_lowercase = hidden_size
_lowercase = initializer_factor
_lowercase = layer_norm_eps
_lowercase = share_link_tower_layers
_lowercase = link_tower_type
_lowercase = num_attention_heads
_lowercase = num_hidden_layers
_lowercase = tie_word_embeddings
_lowercase = init_layernorm_from_vision_encoder
if text_config is None:
_lowercase = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_lowercase = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_lowercase = BridgeTowerTextConfig(**UpperCAmelCase )
_lowercase = BridgeTowerVisionConfig(**UpperCAmelCase )
@classmethod
def _UpperCAmelCase ( cls , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = copy.deepcopy(self.__dict__ )
_lowercase = self.text_config.to_dict()
_lowercase = self.vision_config.to_dict()
_lowercase = self.__class__.model_type
return output
| 398 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : List[str] = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class _lowerCamelCase (lowerCamelCase ):
lowercase__ = """transfo-xl"""
lowercase__ = ["""mems"""]
lowercase__ = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , SCREAMING_SNAKE_CASE_=267_735 , SCREAMING_SNAKE_CASE_=[20_000, 40_000, 200_000] , SCREAMING_SNAKE_CASE_=1_024 , SCREAMING_SNAKE_CASE_=1_024 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_=4_096 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=18 , SCREAMING_SNAKE_CASE_=1_600 , SCREAMING_SNAKE_CASE_=1_000 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=-1 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="normal" , SCREAMING_SNAKE_CASE_=0.0_1 , SCREAMING_SNAKE_CASE_=0.0_1 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0 , **SCREAMING_SNAKE_CASE_ , ):
__snake_case = vocab_size
__snake_case = []
self.cutoffs.extend(SCREAMING_SNAKE_CASE_ )
if proj_share_all_but_first:
__snake_case = [False] + [True] * len(self.cutoffs )
else:
__snake_case = [False] + [False] * len(self.cutoffs )
__snake_case = d_model
__snake_case = d_embed
__snake_case = d_head
__snake_case = d_inner
__snake_case = div_val
__snake_case = pre_lnorm
__snake_case = n_layer
__snake_case = n_head
__snake_case = mem_len
__snake_case = same_length
__snake_case = attn_type
__snake_case = clamp_len
__snake_case = sample_softmax
__snake_case = adaptive
__snake_case = dropout
__snake_case = dropatt
__snake_case = untie_r
__snake_case = init
__snake_case = init_range
__snake_case = proj_init_std
__snake_case = init_std
__snake_case = layer_norm_epsilon
super().__init__(eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def __lowerCamelCase ( self ):
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 345 |
from __future__ import annotations
from typing import Any
def __lowercase( __snake_case : list ) -> int:
if not postfix_notation:
return 0
__snake_case = {'+', '-', '*', '/'}
__snake_case = []
for token in postfix_notation:
if token in operations:
__snake_case , __snake_case = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__snake_case ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | 1 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ : List[Any] = TaConfig.from_json_file(lowerCAmelCase__ )
print(F"Building PyTorch model from configuration: {config}" )
lowerCamelCase_ : Union[str, Any] = TaForConditionalGeneration(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
_lowercase : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowercase : str =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 364 |
'''simple docstring'''
from __future__ import annotations
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = order
# a_{0} ... a_{k}
UpperCAmelCase : Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
UpperCAmelCase : List[Any] = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
UpperCAmelCase : Dict = [0.0] * self.order
# y[n-1] ... y[n-k]
UpperCAmelCase : Optional[Any] = [0.0] * self.order
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
if len(snake_case ) < self.order:
UpperCAmelCase : Dict = [1.0, *a_coeffs]
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
if len(snake_case ) != self.order + 1:
UpperCAmelCase : Optional[Any] = (
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(snake_case )}"
)
raise ValueError(snake_case )
UpperCAmelCase : Optional[int] = a_coeffs
UpperCAmelCase : Optional[Any] = b_coeffs
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
UpperCAmelCase : Optional[int] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
UpperCAmelCase : List[str] = self.input_history[:-1]
UpperCAmelCase : List[Any] = self.output_history[:-1]
UpperCAmelCase : str = sample
UpperCAmelCase : str = result
return result
| 679 | 0 |
from __future__ import annotations
from collections import Counter
from random import random
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int):
'''simple docstring'''
snake_case__ = {}
def __magic_name__ ( self : Any , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = {}
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float):
'''simple docstring'''
if nodea not in self.connections:
self.add_node(UpperCamelCase__)
if nodea not in self.connections:
self.add_node(UpperCamelCase__)
snake_case__ = probability
def __magic_name__ ( self : Any):
'''simple docstring'''
return list(self.connections)
def __magic_name__ ( self : int , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = 0
snake_case__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _UpperCAmelCase ( a : str , a : list[tuple[str, str, float]] , a : int ):
snake_case__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(a , a , a )
snake_case__ = Counter(graph.get_nodes() )
snake_case__ = start
for _ in range(a ):
snake_case__ = graph.transition(a )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
a__ = logging.get_logger(__name__)
@dataclass
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__( self : str , **UpperCamelCase__ : Dict):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case__ = deprecated_arg[3:]
setattr(self , UpperCamelCase__ , not kwargs.pop(UpperCamelCase__))
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''')
snake_case__ = kwargs.pop("""torchscript""" , self.torchscript)
snake_case__ = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics)
snake_case__ = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level)
super().__init__(**UpperCamelCase__)
_lowercase : bool = field(default=lowercase_ , metadata={'''help''': '''Trace the models using torchscript'''} )
_lowercase : bool = field(default=lowercase_ , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''} )
_lowercase : str = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def __magic_name__ ( self : Tuple):
'''simple docstring'''
requires_backends(self , ["""torch"""])
logger.info("""PyTorch: setting up devices""")
if not self.cuda:
snake_case__ = torch.device("""cpu""")
snake_case__ = 0
elif is_torch_tpu_available():
snake_case__ = xm.xla_device()
snake_case__ = 0
else:
snake_case__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""")
snake_case__ = torch.cuda.device_count()
return device, n_gpu
@property
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def __magic_name__ ( self : List[str]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __magic_name__ ( self : Union[str, Any]):
'''simple docstring'''
requires_backends(self , ["""torch"""])
return self._setup_devices[0]
@property
def __magic_name__ ( self : str):
'''simple docstring'''
requires_backends(self , ["""torch"""])
return self._setup_devices[1]
@property
def __magic_name__ ( self : str):
'''simple docstring'''
return self.n_gpu > 0
| 99 | 1 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 0 ) -> List[str]:
"""simple docstring"""
A__ = length or len(lowerCamelCase_ )
A__ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
A__ = list_data[i + 1], list_data[i]
A__ = True
return list_data if not swapped else bubble_sort(lowerCamelCase_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__UpperCAmelCase = {"""configuration_gpt_neox""": ["""GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXConfig"""]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""GPTNeoXTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXForCausalLM""",
"""GPTNeoXForQuestionAnswering""",
"""GPTNeoXForSequenceClassification""",
"""GPTNeoXForTokenClassification""",
"""GPTNeoXLayer""",
"""GPTNeoXModel""",
"""GPTNeoXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 379 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( _lowercase : Tuple ):
'''simple docstring'''
UpperCAmelCase : List[Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
UpperCAmelCase : List[str] = key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
UpperCAmelCase : int = key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase : List[Any] = key[key.find("patch_embed" ) + len("patch_embed" )]
UpperCAmelCase : Optional[int] = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_lowercase )-1}""" )
if "norm" in key:
UpperCAmelCase : List[Any] = key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase : Dict = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
UpperCAmelCase : str = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_lowercase )-1}""" )
if "layer_norm1" in key:
UpperCAmelCase : Optional[Any] = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
UpperCAmelCase : Optional[Any] = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase : Tuple = key[key.find("block" ) + len("block" )]
UpperCAmelCase : Optional[int] = key.replace(F"""block{idx}""" , F"""block.{int(_lowercase )-1}""" )
if "attn.q" in key:
UpperCAmelCase : List[Any] = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
UpperCAmelCase : Union[str, Any] = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
UpperCAmelCase : Optional[Any] = key.replace("attn" , "attention.self" )
if "fc1" in key:
UpperCAmelCase : List[str] = key.replace("fc1" , "dense1" )
if "fc2" in key:
UpperCAmelCase : Tuple = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
UpperCAmelCase : str = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
UpperCAmelCase : Union[str, Any] = key.replace("linear_fuse.conv" , "linear_fuse" )
UpperCAmelCase : Tuple = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase : Any = key[key.find("linear_c" ) + len("linear_c" )]
UpperCAmelCase : List[Any] = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_lowercase )-1}""" )
if "bot_conv" in key:
UpperCAmelCase : Any = key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
UpperCAmelCase : int = key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
UpperCAmelCase : Any = key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
UpperCAmelCase : Union[str, Any] = key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
UpperCAmelCase : str = key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
UpperCAmelCase : List[str] = key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
UpperCAmelCase : Union[str, Any] = key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
UpperCAmelCase : Dict = key.replace("module.last_layer_depth" , "head.head" )
UpperCAmelCase : Tuple = value
return new_state_dict
def lowercase_ ( _lowercase : Optional[int] , _lowercase : Optional[Any] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase : List[str] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
UpperCAmelCase : Optional[int] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase : Optional[Any] = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase : Dict = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase : List[Any] = kv_bias[config.hidden_sizes[i] :]
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return image
@torch.no_grad()
def lowercase_ ( _lowercase : List[Any] , _lowercase : Tuple , _lowercase : List[Any]=False , _lowercase : str=None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase : Dict = GLPNImageProcessor()
# prepare image
UpperCAmelCase : Union[str, Any] = prepare_img()
UpperCAmelCase : str = image_processor(images=_lowercase , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
UpperCAmelCase : Union[str, Any] = torch.load(_lowercase , map_location=torch.device("cpu" ) )
# rename keys
UpperCAmelCase : Optional[int] = rename_keys(_lowercase )
# key and value matrices need special treatment
read_in_k_v(_lowercase , _lowercase )
# create HuggingFace model and load state dict
UpperCAmelCase : Tuple = GLPNForDepthEstimation(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# forward pass
UpperCAmelCase : List[Any] = model(_lowercase )
UpperCAmelCase : Union[str, Any] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase : int = torch.tensor(
[[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] )
elif "kitti" in model_name:
UpperCAmelCase : Any = torch.tensor(
[[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
UpperCAmelCase : List[str] = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _lowercase , atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(_lowercase , _lowercase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_lowercase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowercase , _lowercase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_lowercase , )
if __name__ == "__main__":
snake_case_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
snake_case_ : Union[str, Any] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 292 |
"""simple docstring"""
def lowercase_ ( _lowercase : str ):
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
UpperCAmelCase : Optional[int] = sorted(string.lower() )
return len(_lowercase ) == len(set(_lowercase ) )
if __name__ == "__main__":
snake_case_ : Tuple = input("""Enter a string """).strip()
snake_case_ : str = is_isogram(input_str)
print(f'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 292 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase :List[Any] = {
'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Tuple = [
'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Swinv2ForImageClassification',
'Swinv2ForMaskedImageModeling',
'Swinv2Model',
'Swinv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 222 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__lowerCamelCase :Union[str, Any] = logging.getLogger(__name__)
__lowerCamelCase :str = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__lowerCamelCase :int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A__ :
"""simple docstring"""
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__lowercase)} , )
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''})
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class A__ :
"""simple docstring"""
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''The input training data file (a text file).'''})
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
snake_case__ : Optional[str] =field(
default=__lowercase , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
snake_case__ : bool =field(
default=__lowercase , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
snake_case__ : bool =field(
default=__lowercase , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''})
snake_case__ : bool =field(default=__lowercase , metadata={'''help''': '''Whether ot not to use whole word mask.'''})
snake_case__ : float =field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''})
snake_case__ : float =field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
snake_case__ : int =field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''})
snake_case__ : int =field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
snake_case__ : bool =field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''})
def snake_case ( UpperCamelCase__ : DataTrainingArguments , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[str] = None , ) -> Optional[int]:
def _dataset(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , ref_path=UpperCamelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCamelCase__ , file_path=UpperCamelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCamelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCamelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def snake_case ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCamelCase : str = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCamelCase : Tuple = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowerCamelCase : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
lowerCamelCase : Tuple = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
lowerCamelCase : Dict = AutoModelWithLMHead.from_config(UpperCamelCase__ )
model.resize_token_embeddings(len(UpperCamelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
lowerCamelCase : Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCamelCase : List[Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowerCamelCase : Tuple = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCamelCase : Optional[Any] = (
get_dataset(UpperCamelCase__ , tokenizer=UpperCamelCase__ , evaluate=UpperCamelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCamelCase : Optional[int] = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCamelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCamelCase : int = DataCollatorForWholeWordMask(
tokenizer=UpperCamelCase__ , mlm_probability=data_args.mlm_probability )
else:
lowerCamelCase : Optional[Any] = DataCollatorForLanguageModeling(
tokenizer=UpperCamelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCamelCase : List[Any] = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , data_collator=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , prediction_loss_only=UpperCamelCase__ , )
# Training
if training_args.do_train:
lowerCamelCase : Any = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCamelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase : Any = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCamelCase : Dict = trainer.evaluate()
lowerCamelCase : List[Any] = math.exp(eval_output["""eval_loss"""] )
lowerCamelCase : Any = {"""perplexity""": perplexity}
lowerCamelCase : List[Any] = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(UpperCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , UpperCamelCase__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(UpperCamelCase__ )
return results
def snake_case ( UpperCamelCase__ : Optional[int] ) -> Optional[int]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 222 | 1 |
'''simple docstring'''
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 704 | '''simple docstring'''
import argparse
import os
import re
import packaging.version
__snake_case : int = 'examples/'
__snake_case : Dict = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
__snake_case : List[str] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
__snake_case : int = 'README.md'
def _UpperCAmelCase ( _UpperCamelCase : Optional[int], _UpperCamelCase : List[Any], _UpperCamelCase : List[str] ) -> int:
with open(_UpperCamelCase, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
A_ = f.read()
A_ ,A_ = REPLACE_PATTERNS[pattern]
A_ = replace.replace('''VERSION''', _UpperCamelCase )
A_ = re_pattern.sub(_UpperCamelCase, _UpperCamelCase )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.write(_UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : Any ) -> int:
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_UpperCamelCase, _UpperCamelCase ), _UpperCamelCase, pattern='''examples''' )
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any], _UpperCamelCase : str=False ) -> List[str]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def _UpperCAmelCase ( ) -> Dict:
A_ = '''🤗 Transformers currently provides the following architectures'''
A_ = '''1. Want to contribute a new model?'''
with open(_UpperCamelCase, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
A_ = f.readlines()
# Find the start of the list.
A_ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
A_ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
A_ = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''', '''https://huggingface.co/docs/diffusers/model_doc''', )
index += 1
with open(_UpperCamelCase, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(_UpperCamelCase )
def _UpperCAmelCase ( ) -> List[Any]:
with open(REPLACE_FILES['''init'''], '''r''' ) as f:
A_ = f.read()
A_ = REPLACE_PATTERNS['''init'''][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def _UpperCAmelCase ( _UpperCamelCase : str=False ) -> Union[str, Any]:
A_ = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
A_ = default_version.base_version
elif patch:
A_ = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
A_ = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
A_ = input(F'''Which version are you releasing? [{default_version}]''' )
if len(_UpperCamelCase ) == 0:
A_ = default_version
print(F'''Updating version to {version}.''' )
global_version_update(_UpperCamelCase, patch=_UpperCamelCase )
def _UpperCAmelCase ( ) -> int:
A_ = get_version()
A_ = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
A_ = current_version.base_version
# Check with the user we got that right.
A_ = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(_UpperCamelCase ) == 0:
A_ = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(_UpperCamelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
__snake_case : int = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
__snake_case : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 174 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Tuple = logging.get_logger(__name__)
_UpperCamelCase : int = {
"""microsoft/unispeech-sat-base-100h-libri-ft""": (
"""https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"""
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class _snake_case ( lowerCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = 'unispeech-sat'
def __init__( self , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE="group" , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) , _SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1_28 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=3_20 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1_00 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="mean" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=(5_12, 5_12, 5_12, 5_12, 15_00) , _SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) , _SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5_04 , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = hidden_size
lowerCAmelCase = feat_extract_norm
lowerCAmelCase = feat_extract_activation
lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = conv_bias
lowerCAmelCase = num_conv_pos_embeddings
lowerCAmelCase = num_conv_pos_embedding_groups
lowerCAmelCase = len(self.conv_dim )
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = feat_proj_dropout
lowerCAmelCase = final_dropout
lowerCAmelCase = layerdrop
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
lowerCAmelCase = vocab_size
lowerCAmelCase = num_clusters
lowerCAmelCase = do_stable_layer_norm
lowerCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCAmelCase = apply_spec_augment
lowerCAmelCase = mask_time_prob
lowerCAmelCase = mask_time_length
lowerCAmelCase = mask_time_min_masks
lowerCAmelCase = mask_feature_prob
lowerCAmelCase = mask_feature_length
lowerCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCAmelCase = num_codevectors_per_group
lowerCAmelCase = num_codevector_groups
lowerCAmelCase = contrastive_logits_temperature
lowerCAmelCase = feat_quantizer_dropout
lowerCAmelCase = num_negatives
lowerCAmelCase = codevector_dim
lowerCAmelCase = proj_codevector_dim
lowerCAmelCase = diversity_loss_weight
# ctc loss
lowerCAmelCase = ctc_loss_reduction
lowerCAmelCase = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = list(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = xvector_output_dim
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 284 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Optional[int] = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[Any] ='altclip_text_model'
def __init__( self, lowerCAmelCase=250_002, lowerCAmelCase=1_024, lowerCAmelCase=24, lowerCAmelCase=16, lowerCAmelCase=4_096, lowerCAmelCase="gelu", lowerCAmelCase=0.1, lowerCAmelCase=0.1, lowerCAmelCase=514, lowerCAmelCase=1, lowerCAmelCase=0.0_2, lowerCAmelCase=0.0_2, lowerCAmelCase=1e-05, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase="absolute", lowerCAmelCase=True, lowerCAmelCase=768, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =vocab_size
lowerCamelCase_ =hidden_size
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =hidden_act
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =hidden_dropout_prob
lowerCamelCase_ =attention_probs_dropout_prob
lowerCamelCase_ =max_position_embeddings
lowerCamelCase_ =type_vocab_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =position_embedding_type
lowerCamelCase_ =use_cache
lowerCamelCase_ =project_dim
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip_vision_model'
def __init__( self, lowerCAmelCase=768, lowerCAmelCase=3_072, lowerCAmelCase=512, lowerCAmelCase=12, lowerCAmelCase=12, lowerCAmelCase=3, lowerCAmelCase=224, lowerCAmelCase=32, lowerCAmelCase="quick_gelu", lowerCAmelCase=1e-5, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, **lowerCAmelCase, ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
lowerCamelCase_ =hidden_size
lowerCamelCase_ =intermediate_size
lowerCamelCase_ =projection_dim
lowerCamelCase_ =num_hidden_layers
lowerCamelCase_ =num_attention_heads
lowerCamelCase_ =num_channels
lowerCamelCase_ =patch_size
lowerCamelCase_ =image_size
lowerCamelCase_ =initializer_range
lowerCamelCase_ =initializer_factor
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =layer_norm_eps
lowerCamelCase_ =hidden_act
@classmethod
def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase )
lowerCamelCase_, lowerCamelCase_ =cls.get_config_dict(lowerCAmelCase, **lowerCAmelCase )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('''model_type''' ) == "altclip":
lowerCamelCase_ =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls, '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCAmelCase, **lowerCAmelCase )
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Dict ='altclip'
lowercase : str =True
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=768, lowerCAmelCase=2.6_5_9_2, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =kwargs.pop('''text_config_dict''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''vision_config_dict''', lowerCAmelCase )
super().__init__(**lowerCAmelCase )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `text_config_dict`.
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowerCamelCase_ ={}
# This is the complete result when using `vision_config_dict`.
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowerCamelCase_ ={
str(lowerCAmelCase ): value for key, value in _vision_config_dict['''id2label'''].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowerCamelCase_ =(
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase_ =(
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowerCAmelCase )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowerCamelCase_ ={}
logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' )
if vision_config is None:
lowerCamelCase_ ={}
logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' )
lowerCamelCase_ =AltCLIPTextConfig(**lowerCAmelCase )
lowerCamelCase_ =AltCLIPVisionConfig(**lowerCAmelCase )
lowerCamelCase_ =projection_dim
lowerCamelCase_ =logit_scale_init_value
lowerCamelCase_ =1.0
@classmethod
def lowercase__ ( cls, lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =copy.deepcopy(self.__dict__ )
lowerCamelCase_ =self.text_config.to_dict()
lowerCamelCase_ =self.vision_config.to_dict()
lowerCamelCase_ =self.__class__.model_type
return output
| 676 | 0 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Any = 'data2vec-audio'
def __init__( self : List[str] , a : Optional[Any]=32 , a : Optional[int]=768 , a : Optional[int]=12 , a : List[Any]=12 , a : Optional[int]=3_072 , a : List[Any]="gelu" , a : Optional[Any]=0.1 , a : List[Any]=0.1 , a : Optional[int]=0.1 , a : Dict=0.0 , a : Optional[Any]=0.1 , a : List[Any]=0.1 , a : List[str]=0.02 , a : Any=1E-5 , a : Optional[int]="gelu" , a : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , a : List[str]=(5, 2, 2, 2, 2, 2, 2) , a : Dict=(10, 3, 3, 3, 3, 2, 2) , a : str=False , a : int=16 , a : Dict=19 , a : int=5 , a : Optional[Any]=0.05 , a : Tuple=10 , a : int=2 , a : Optional[Any]=0.0 , a : Any=10 , a : Any=0 , a : List[Any]="sum" , a : str=False , a : Dict=False , a : Optional[Any]=256 , a : Optional[Any]=(512, 512, 512, 512, 1_500) , a : Tuple=(5, 3, 3, 1, 1) , a : List[Any]=(1, 2, 3, 1, 1) , a : int=512 , a : List[str]=0 , a : Tuple=1 , a : Dict=2 , a : Dict=False , a : Optional[Any]=3 , a : List[str]=2 , a : Union[str, Any]=3 , a : Optional[Any]=None , **a : Optional[Any] , )-> List[Any]:
"""simple docstring"""
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowercase__ = hidden_size
lowercase__ = feat_extract_activation
lowercase__ = list(a )
lowercase__ = list(a )
lowercase__ = list(a )
lowercase__ = conv_bias
lowercase__ = num_conv_pos_embeddings
lowercase__ = num_conv_pos_embedding_groups
lowercase__ = conv_pos_kernel_size
lowercase__ = len(self.conv_dim )
lowercase__ = num_hidden_layers
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = feat_proj_dropout
lowercase__ = final_dropout
lowercase__ = layerdrop
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = vocab_size
lowercase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
# ctc loss
lowercase__ = ctc_loss_reduction
lowercase__ = ctc_zero_infinity
# adapter
lowercase__ = add_adapter
lowercase__ = adapter_kernel_size
lowercase__ = adapter_stride
lowercase__ = num_adapter_layers
lowercase__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__ = list(a )
lowercase__ = list(a )
lowercase__ = list(a )
lowercase__ = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Union[str, Any]:
"""simple docstring"""
return math.prod(self.conv_stride )
| 45 |
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowercase__ = ''
lowercase__ = 0
lowercase__ = 0
while div != 1:
lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
lowercase__ = num // base
lowercase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 45 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__: Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__: int = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = """markuplm"""
def __init__( self : Optional[int] , __snake_case : List[str]=30522 , __snake_case : int=768 , __snake_case : Optional[int]=12 , __snake_case : int=12 , __snake_case : List[str]=3072 , __snake_case : Union[str, Any]="gelu" , __snake_case : List[Any]=0.1 , __snake_case : Any=0.1 , __snake_case : Any=512 , __snake_case : Union[str, Any]=2 , __snake_case : List[str]=0.02 , __snake_case : Union[str, Any]=1E-12 , __snake_case : str=0 , __snake_case : int=0 , __snake_case : Union[str, Any]=2 , __snake_case : Union[str, Any]=256 , __snake_case : int=1024 , __snake_case : List[str]=216 , __snake_case : Dict=1001 , __snake_case : str=32 , __snake_case : Dict=50 , __snake_case : str="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Optional[Any]=None , **__snake_case : str , ) -> List[Any]:
super().__init__(
pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case , )
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : int = hidden_size
UpperCAmelCase : List[str] = num_hidden_layers
UpperCAmelCase : Tuple = num_attention_heads
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : List[Any] = hidden_dropout_prob
UpperCAmelCase : Tuple = attention_probs_dropout_prob
UpperCAmelCase : List[str] = max_position_embeddings
UpperCAmelCase : str = type_vocab_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : List[str] = layer_norm_eps
UpperCAmelCase : List[str] = position_embedding_type
UpperCAmelCase : str = use_cache
UpperCAmelCase : Optional[int] = classifier_dropout
# additional properties
UpperCAmelCase : Tuple = max_depth
UpperCAmelCase : Optional[int] = max_xpath_tag_unit_embeddings
UpperCAmelCase : str = max_xpath_subs_unit_embeddings
UpperCAmelCase : Optional[int] = tag_pad_id
UpperCAmelCase : List[Any] = subs_pad_id
UpperCAmelCase : Dict = xpath_unit_hidden_size
| 127 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def lowercase ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
'''simple docstring'''
if not is_accelerate_available():
return method
SCREAMING_SNAKE_CASE_ = version.parse(accelerate.__version__ ).base_version
if version.parse(SCREAMING_SNAKE_CASE ) < version.parse('0.17.0' ):
return method
def wrapper(self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return wrapper
| 205 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
__SCREAMING_SNAKE_CASE : Any = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
__SCREAMING_SNAKE_CASE : List[Any] = '''</w>'''
__SCREAMING_SNAKE_CASE : str = '''@@ '''
def snake_case_ ( lowercase__ : Dict ):
'''simple docstring'''
_lowerCAmelCase =set()
_lowerCAmelCase =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase =char
return pairs
# Speech2Text2 has no max input length
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''facebook/s2t-wav2vec2-large-en-de''': 1_024}
class __lowerCamelCase ( a__ ):
"""simple docstring"""
a_: Tuple = VOCAB_FILES_NAMES
a_: List[Any] = PRETRAINED_VOCAB_FILES_MAP
a_: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_: Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : int="<s>" , lowerCamelCase_ : Optional[Any]="<pad>" , lowerCamelCase_ : Any="</s>" , lowerCamelCase_ : Any="<unk>" , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : Any , ):
super().__init__(
unk_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , do_lower_case=_A , **_A , )
_lowerCAmelCase =do_lower_case
with open(_A , encoding="""utf-8""" ) as vocab_handle:
_lowerCAmelCase =json.load(_A )
_lowerCAmelCase ={v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
_lowerCAmelCase =None
_lowerCAmelCase =None
else:
with open(_A , encoding="""utf-8""" ) as merges_handle:
_lowerCAmelCase =merges_handle.read().split("""\n""" )[:-1]
_lowerCAmelCase =[tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase =dict(zip(_A , range(len(_A ) ) ) )
_lowerCAmelCase ={}
@property
def lowerCAmelCase__ ( self : str ):
return len(self.decoder )
def lowerCAmelCase__ ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase_ : Tuple ):
_lowerCAmelCase =tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase =get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase =min(_A , key=lambda lowerCamelCase_ : self.bpe_ranks.get(_A , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase =bigram
_lowerCAmelCase =[]
_lowerCAmelCase =0
while i < len(_A ):
try:
_lowerCAmelCase =word.index(_A , _A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase =j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase =tuple(_A )
_lowerCAmelCase =new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase =get_pairs(_A )
_lowerCAmelCase =' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase ='\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase =word.replace(_A , """""" )
_lowerCAmelCase =word.replace(""" """ , _A )
_lowerCAmelCase =word
return word
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase_ : Optional[int] ):
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""" )
if self.do_lower_case:
_lowerCAmelCase =text.lower()
_lowerCAmelCase =text.split()
_lowerCAmelCase =[]
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(""" """ ) ) )
return split_tokens
def lowerCAmelCase__ ( self : int , lowerCamelCase_ : Any ):
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase_ : Tuple ):
_lowerCAmelCase =self.decoder.get(_A , self.unk_token )
return result
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase_ : int ):
_lowerCAmelCase =' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase =''.join(string.split(_A ) )
return string
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] = None ):
if not os.path.isdir(_A ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
_lowerCAmelCase =os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase =os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + """\n""" )
_lowerCAmelCase =0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A , """w""" , encoding="""utf-8""" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
_lowerCAmelCase =token_index
writer.write(""" """.join(_A ) + """\n""" )
index += 1
return (vocab_file, merges_file)
| 716 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : str = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__SCREAMING_SNAKE_CASE : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(vocab, range(len(vocab))))
__SCREAMING_SNAKE_CASE : int = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Optional[Any] = Path(tmpdirname)
__SCREAMING_SNAKE_CASE : Any = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__SCREAMING_SNAKE_CASE : Optional[Any] = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__SCREAMING_SNAKE_CASE : Tuple = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__SCREAMING_SNAKE_CASE : List[Any] = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__SCREAMING_SNAKE_CASE : List[Any] = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__SCREAMING_SNAKE_CASE : Optional[Any] = FSMTForConditionalGeneration(config)
print(F'num of params {tiny_model.num_parameters()}')
# Test
__SCREAMING_SNAKE_CASE : Tuple = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__SCREAMING_SNAKE_CASE : Optional[Any] = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 149 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def UpperCamelCase_ ( A__ ):
a_ = int(number**0.5 )
return number == sq * sq
def UpperCamelCase_ ( A__ , A__ , A__ , A__ , A__ , A__ ):
a_ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
a_ = x_den * y_den * z_den
a_ = gcd(A__ , A__ )
top //= hcf
bottom //= hcf
return top, bottom
def UpperCamelCase_ ( A__ = 35 ):
a_ = set()
a_ = 42
a_ = Fraction(0 )
a_ = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
a_ = x_num * y_den + x_den * y_num
a_ = x_den * y_den
a_ = gcd(A__ , A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a_ = add_three(
A__ , A__ , A__ , A__ , A__ , A__ )
unique_s.add(A__ )
# n=2
a_ = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
a_ = x_den * x_den * y_den * y_den
if is_sq(A__ ) and is_sq(A__ ):
a_ = int(sqrt(A__ ) )
a_ = int(sqrt(A__ ) )
a_ = gcd(A__ , A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a_ = add_three(
A__ , A__ , A__ , A__ , A__ , A__ )
unique_s.add(A__ )
# n=-1
a_ = x_num * y_num
a_ = x_den * y_num + x_num * y_den
a_ = gcd(A__ , A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a_ = add_three(
A__ , A__ , A__ , A__ , A__ , A__ )
unique_s.add(A__ )
# n=2
a_ = x_num * x_num * y_num * y_num
a_ = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(A__ ) and is_sq(A__ ):
a_ = int(sqrt(A__ ) )
a_ = int(sqrt(A__ ) )
a_ = gcd(A__ , A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
a_ = add_three(
A__ , A__ , A__ , A__ , A__ , A__ )
unique_s.add(A__ )
for num, den in unique_s:
total += Fraction(A__ , A__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 263 |
'''simple docstring'''
import math
def UpperCamelCase_ ( A__ ):
return math.sqrt(A__ ) * math.sqrt(A__ ) == num
def UpperCamelCase_ ( A__ ):
a_ = 0
a_ = n
while left <= right:
a_ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
a_ = mid - 1
else:
a_ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 | 1 |
def A ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ):
_snake_case : str = set()
# Replace all the whitespace in our sentence
_snake_case : Dict = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCAmelCase ) == 26
def A ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ):
_snake_case : Dict = [False] * 26
for char in input_str:
if char.islower():
_snake_case : List[Any] = True
elif char.isupper():
_snake_case : Optional[Any] = True
return all(UpperCAmelCase )
def A ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def A ( ):
from timeit import timeit
_snake_case : Dict = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=UpperCAmelCase ) )
print(timeit("is_pangram_faster()" , setup=UpperCAmelCase ) )
print(timeit("is_pangram_fastest()" , setup=UpperCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 278 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__lowerCAmelCase :Tuple = logging.get_logger(__name__)
@dataclass
class _a:
lowerCamelCase__ :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
lowerCamelCase__ :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowerCamelCase__ :int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase__ :bool = field(
default=__A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
_snake_case : Dict = self.task_name.lower()
class _a( __A ):
lowerCamelCase__ :Optional[Any] = 'train'
lowerCamelCase__ :List[str] = 'dev'
lowerCamelCase__ :Any = 'test'
class _a( __A ):
lowerCamelCase__ :GlueDataTrainingArguments
lowerCamelCase__ :str
lowerCamelCase__ :List[InputFeatures]
def __init__( self , __snake_case , __snake_case , __snake_case = None , __snake_case = Split.train , __snake_case = None , ) -> Any:
'''simple docstring'''
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , __snake_case , )
_snake_case : Optional[Any] = args
_snake_case : Optional[int] = glue_processors[args.task_name]()
_snake_case : int = glue_output_modes[args.task_name]
if isinstance(__snake_case , __snake_case ):
try:
_snake_case : Tuple = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
_snake_case : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_snake_case : List[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_snake_case , _snake_case : int = label_list[2], label_list[1]
_snake_case : str = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_snake_case : Optional[Any] = cached_features_file + ".lock"
with FileLock(__snake_case ):
if os.path.exists(__snake_case ) and not args.overwrite_cache:
_snake_case : List[str] = time.time()
_snake_case : str = torch.load(__snake_case )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_snake_case : List[Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_snake_case : List[Any] = self.processor.get_test_examples(args.data_dir )
else:
_snake_case : str = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_snake_case : Tuple = examples[:limit_length]
_snake_case : List[str] = glue_convert_examples_to_features(
__snake_case , __snake_case , max_length=args.max_seq_length , label_list=__snake_case , output_mode=self.output_mode , )
_snake_case : Optional[Any] = time.time()
torch.save(self.features , __snake_case )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> List[str]:
'''simple docstring'''
return len(self.features )
def __getitem__( self , __snake_case ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
def lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
return self.label_list | 278 | 1 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = MobileBertTokenizer
UpperCAmelCase : List[Any] = MobileBertTokenizerFast
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : Union[str, Any] = filter_non_english
UpperCAmelCase : str = """google/mobilebert-uncased"""
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _lowercase ( self : Dict , lowerCAmelCase_ : Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = '''unwanted, running'''
return input_text, output_text
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowerCAmelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = tokenizer.encode(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
SCREAMING_SNAKE_CASE_ = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = tokenizer.encode(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def _lowercase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowercase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def _lowercase ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowercase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowercase ( self : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowercase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _lowercase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def _lowercase ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
SCREAMING_SNAKE_CASE_ = {}
for i, token in enumerate(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ = i
SCREAMING_SNAKE_CASE_ = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def _lowercase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def _lowercase ( self : int ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def _lowercase ( self : int ) -> Optional[int]:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def _lowercase ( self : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def _lowercase ( self : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
SCREAMING_SNAKE_CASE_ = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _lowercase ( self : Optional[Any] ) -> str:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
SCREAMING_SNAKE_CASE_ = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
SCREAMING_SNAKE_CASE_ = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , '''do_lower_case''' ) else False
SCREAMING_SNAKE_CASE_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''的''', '''人''', '''有''']
SCREAMING_SNAKE_CASE_ = ''''''.join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
SCREAMING_SNAKE_CASE_ = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 393 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def UpperCAmelCase ( UpperCAmelCase )-> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = int(UpperCAmelCase )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = t // 3600, (t // 60) % 60, t % 60
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=300 )-> Optional[Any]:
'''simple docstring'''
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def UpperCAmelCase ( UpperCAmelCase )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
SCREAMING_SNAKE_CASE_ = f'''{elt:.6f}''' if isinstance(UpperCAmelCase ,UpperCAmelCase ) else str(UpperCAmelCase )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class snake_case :
'''simple docstring'''
UpperCAmelCase : Tuple = 5
UpperCAmelCase : Any = 0.2
def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional["NotebookTrainingTracker"] = None , lowerCAmelCase_ : int = 300 , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = total
SCREAMING_SNAKE_CASE_ = '''''' if prefix is None else prefix
SCREAMING_SNAKE_CASE_ = leave
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = width
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : str = None ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = value
if comment is not None:
SCREAMING_SNAKE_CASE_ = comment
if self.last_value is None:
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = self.warmup
SCREAMING_SNAKE_CASE_ = 1
self.update_bar(lowerCAmelCase_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
SCREAMING_SNAKE_CASE_ = time.time()
SCREAMING_SNAKE_CASE_ = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
SCREAMING_SNAKE_CASE_ = self.elapsed_time / (value - self.start_value)
else:
SCREAMING_SNAKE_CASE_ = None
if value >= self.total:
SCREAMING_SNAKE_CASE_ = self.total
SCREAMING_SNAKE_CASE_ = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
SCREAMING_SNAKE_CASE_ = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = value
SCREAMING_SNAKE_CASE_ = current_time
if self.average_time_per_item is None:
SCREAMING_SNAKE_CASE_ = 1
else:
SCREAMING_SNAKE_CASE_ = max(int(self.update_every / self.average_time_per_item ) , 1 )
def _lowercase ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int=None ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ''' ''' * (len(str(self.total ) ) - len(str(lowerCAmelCase_ ) )) + str(lowerCAmelCase_ )
if self.elapsed_time is None:
SCREAMING_SNAKE_CASE_ = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
SCREAMING_SNAKE_CASE_ = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
SCREAMING_SNAKE_CASE_ = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def _lowercase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
SCREAMING_SNAKE_CASE_ = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def _lowercase ( self : Optional[int] ) -> int:
"""simple docstring"""
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = None if column_names is None else [column_names]
SCREAMING_SNAKE_CASE_ = None
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
SCREAMING_SNAKE_CASE_ = disp.display(disp.HTML(self.html_code ) , display_id=lowerCAmelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def _lowercase ( self : List[str] , lowerCAmelCase_ : int ) -> List[str]:
"""simple docstring"""
if self.inner_table is None:
SCREAMING_SNAKE_CASE_ = [list(values.keys() ), list(values.values() )]
else:
SCREAMING_SNAKE_CASE_ = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = columns
self.inner_table.append([values[c] for c in columns] )
def _lowercase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : List[str]=300 ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = NotebookProgressBar(lowerCAmelCase_ , prefix=lowerCAmelCase_ , parent=self , width=lowerCAmelCase_ )
return self.child_bar
def _lowercase ( self : Optional[int] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
self.display()
class snake_case ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = False
def _lowercase ( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
SCREAMING_SNAKE_CASE_ = NotebookTrainingTracker(state.max_steps , lowerCAmelCase_ )
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , **lowerCAmelCase_ : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
SCREAMING_SNAKE_CASE_ = False
def _lowercase ( self : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if not has_length(lowerCAmelCase_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE_ = self.training_tracker.add_child(len(lowerCAmelCase_ ) )
else:
SCREAMING_SNAKE_CASE_ = NotebookProgressBar(len(lowerCAmelCase_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _lowercase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , **lowerCAmelCase_ : int ) -> Any:
"""simple docstring"""
if self.prediction_bar is not None:
self.prediction_bar.close()
SCREAMING_SNAKE_CASE_ = None
def _lowercase ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str=None , **lowerCAmelCase_ : Tuple ) -> List[str]:
"""simple docstring"""
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
SCREAMING_SNAKE_CASE_ = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
SCREAMING_SNAKE_CASE_ = state.global_step
self.training_tracker.write_line(lowerCAmelCase_ )
def _lowercase ( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : str ) -> str:
"""simple docstring"""
if self.training_tracker is not None:
SCREAMING_SNAKE_CASE_ = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
SCREAMING_SNAKE_CASE_ = log['''loss''']
break
if self.first_column == "Epoch":
SCREAMING_SNAKE_CASE_ = int(state.epoch )
else:
SCREAMING_SNAKE_CASE_ = state.global_step
SCREAMING_SNAKE_CASE_ = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
SCREAMING_SNAKE_CASE_ = re.sub(r'''\_loss$''' , '''''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = metrics.pop('''total_flos''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = metrics.pop('''epoch''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = metrics.pop(F'''{metric_key_prefix}_runtime''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , lowerCAmelCase_ )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
SCREAMING_SNAKE_CASE_ = v
else:
SCREAMING_SNAKE_CASE_ = k.split('''_''' )
SCREAMING_SNAKE_CASE_ = ''' '''.join([part.capitalize() for part in splits[1:]] )
SCREAMING_SNAKE_CASE_ = v
self.training_tracker.write_line(lowerCAmelCase_ )
self.training_tracker.remove_child()
SCREAMING_SNAKE_CASE_ = None
# Evaluation takes a long time so we should force the next update.
SCREAMING_SNAKE_CASE_ = True
def _lowercase ( self : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = None
| 393 | 1 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = """SpeechT5FeatureExtractor"""
A_ = """SpeechT5Tokenizer"""
def __init__( self : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ) -> List[Any]:
'''simple docstring'''
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __call__( self : Union[str, Any] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Any ) -> str:
'''simple docstring'''
_lowercase : str = kwargs.pop('audio' , UpperCamelCase_ )
_lowercase : Any = kwargs.pop('text' , UpperCamelCase_ )
_lowercase : List[str] = kwargs.pop('text_target' , UpperCamelCase_ )
_lowercase : int = kwargs.pop('audio_target' , UpperCamelCase_ )
_lowercase : Any = kwargs.pop('sampling_rate' , UpperCamelCase_ )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
_lowercase : Union[str, Any] = self.feature_extractor(UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , **UpperCamelCase_ )
elif text is not None:
_lowercase : int = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_ )
else:
_lowercase : Optional[int] = None
if audio_target is not None:
_lowercase : Tuple = self.feature_extractor(audio_target=UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : str = targets['input_values']
elif text_target is not None:
_lowercase : List[str] = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Tuple = targets['input_ids']
else:
_lowercase : Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
_lowercase : Optional[Any] = labels
_lowercase : List[Any] = targets.get('attention_mask' )
if decoder_attention_mask is not None:
_lowercase : Union[str, Any] = decoder_attention_mask
return inputs
def __UpperCAmelCase ( self : List[str] , *UpperCamelCase_ : str , **UpperCamelCase_ : Any ) -> List[Any]:
'''simple docstring'''
_lowercase : Optional[Any] = kwargs.pop('input_values' , UpperCamelCase_ )
_lowercase : int = kwargs.pop('input_ids' , UpperCamelCase_ )
_lowercase : Dict = kwargs.pop('labels' , UpperCamelCase_ )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
_lowercase : int = self.feature_extractor.pad(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
elif input_ids is not None:
_lowercase : int = self.tokenizer.pad(UpperCamelCase_ , **UpperCamelCase_ )
else:
_lowercase : Union[str, Any] = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCamelCase_ , UpperCamelCase_ ) and "input_ids" in labels[0]):
_lowercase : Any = self.tokenizer.pad(UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Dict = targets['input_ids']
else:
_lowercase : Dict = self.feature_extractor.feature_size
_lowercase : Union[str, Any] = self.feature_extractor.num_mel_bins
_lowercase : int = self.feature_extractor.pad(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
_lowercase : Any = feature_size_hack
_lowercase : List[str] = targets['input_values']
else:
_lowercase : Dict = None
if inputs is None:
return targets
if targets is not None:
_lowercase : Optional[Any] = labels
_lowercase : Union[str, Any] = targets.get('attention_mask' )
if decoder_attention_mask is not None:
_lowercase : Tuple = decoder_attention_mask
return inputs
def __UpperCAmelCase ( self : List[str] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : int , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : int ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
| 4 |
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> bool:
return str(_lowercase ) == str(_lowercase )[::-1]
def __UpperCamelCase ( _lowercase ) -> int:
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def __UpperCamelCase ( _lowercase = 1_0000 ) -> int:
_lowercase : List[str] = []
for num in range(1, _lowercase ):
_lowercase : Tuple = 0
_lowercase : Tuple = num
while iterations < 50:
_lowercase : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4 | 1 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
lowerCAmelCase__ :str = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def lowerCAmelCase__ ( a__: Dict , a__: tuple , a__: Path , a__: Optional[int] , a__: int , a__: Optional[int] , a__: Optional[int] , a__: Tuple=False , ) -> Optional[Any]:
'''simple docstring'''
output_path.parent.mkdir(parents=a__ , exist_ok=a__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
a__ , a__ , f=output_path.as_posix() , input_names=a__ , output_names=a__ , dynamic_axes=a__ , do_constant_folding=a__ , use_external_data_format=a__ , enable_onnx_checker=a__ , opset_version=a__ , )
else:
export(
a__ , a__ , f=output_path.as_posix() , input_names=a__ , output_names=a__ , dynamic_axes=a__ , do_constant_folding=a__ , opset_version=a__ , )
@torch.no_grad()
def lowerCAmelCase__ ( a__: str , a__: str , a__: int , a__: bool = False ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
_UpperCAmelCase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = StableDiffusionPipeline.from_pretrained(a__ , torch_dtype=a__ ).to(a__ )
_UpperCAmelCase = Path(a__ )
# TEXT ENCODER
_UpperCAmelCase = pipeline.text_encoder.config.max_position_embeddings
_UpperCAmelCase = pipeline.text_encoder.config.hidden_size
_UpperCAmelCase = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=a__ , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=a__ , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=a__ , )
del pipeline.text_encoder
# UNET
_UpperCAmelCase = pipeline.unet.config.in_channels
_UpperCAmelCase = pipeline.unet.config.sample_size
_UpperCAmelCase = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , a__ , a__ , a__ ).to(device=a__ , dtype=a__ ),
torch.randn(2 ).to(device=a__ , dtype=a__ ),
torch.randn(2 , a__ , a__ ).to(device=a__ , dtype=a__ ),
False,
) , output_path=a__ , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=a__ , use_external_data_format=a__ , )
_UpperCAmelCase = str(unet_path.absolute().as_posix() )
_UpperCAmelCase = os.path.dirname(a__ )
_UpperCAmelCase = onnx.load(a__ )
# clean up existing tensor files
shutil.rmtree(a__ )
os.mkdir(a__ )
# collate external tensor files into one
onnx.save_model(
a__ , a__ , save_as_external_data=a__ , all_tensors_to_one_file=a__ , location='weights.pb' , convert_attribute=a__ , )
del pipeline.unet
# VAE ENCODER
_UpperCAmelCase = pipeline.vae
_UpperCAmelCase = vae_encoder.config.in_channels
_UpperCAmelCase = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
_UpperCAmelCase = lambda a__ , a__ : vae_encoder.encode(a__ , a__ )[0].sample()
onnx_export(
a__ , model_args=(
torch.randn(1 , a__ , a__ , a__ ).to(device=a__ , dtype=a__ ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=a__ , )
# VAE DECODER
_UpperCAmelCase = pipeline.vae
_UpperCAmelCase = vae_decoder.config.latent_channels
_UpperCAmelCase = vae_decoder.config.out_channels
# forward only through the decoder part
_UpperCAmelCase = vae_encoder.decode
onnx_export(
a__ , model_args=(
torch.randn(1 , a__ , a__ , a__ ).to(device=a__ , dtype=a__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=a__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
_UpperCAmelCase = pipeline.safety_checker
_UpperCAmelCase = safety_checker.config.vision_config.num_channels
_UpperCAmelCase = safety_checker.config.vision_config.image_size
_UpperCAmelCase = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , a__ , a__ , a__ , ).to(device=a__ , dtype=a__ ),
torch.randn(1 , a__ , a__ , a__ ).to(device=a__ , dtype=a__ ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=a__ , )
del pipeline.safety_checker
_UpperCAmelCase = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
_UpperCAmelCase = pipeline.feature_extractor
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=a__ , feature_extractor=a__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(a__ )
print('ONNX pipeline saved to' , a__ )
del pipeline
del onnx_pipeline
_UpperCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(a__ , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
lowerCAmelCase__ :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
lowerCAmelCase__ :Union[str, Any] = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 618 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 618 | 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : Any = """detr"""
_SCREAMING_SNAKE_CASE : Optional[int] = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE : Optional[int] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self :Optional[int] , __magic_name__ :Optional[int]=True , __magic_name__ :int=None , __magic_name__ :List[str]=3 , __magic_name__ :List[Any]=100 , __magic_name__ :Tuple=6 , __magic_name__ :Optional[Any]=2_048 , __magic_name__ :Union[str, Any]=8 , __magic_name__ :List[Any]=6 , __magic_name__ :str=2_048 , __magic_name__ :Dict=8 , __magic_name__ :Optional[int]=0.0 , __magic_name__ :Optional[Any]=0.0 , __magic_name__ :List[str]=True , __magic_name__ :Optional[int]="relu" , __magic_name__ :Optional[int]=256 , __magic_name__ :Dict=0.1 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :Union[str, Any]=0.0 , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :Tuple=1.0 , __magic_name__ :Optional[int]=False , __magic_name__ :Tuple="sine" , __magic_name__ :Optional[Any]="resnet50" , __magic_name__ :str=True , __magic_name__ :int=False , __magic_name__ :Any=1 , __magic_name__ :Tuple=5 , __magic_name__ :List[str]=2 , __magic_name__ :int=1 , __magic_name__ :Tuple=1 , __magic_name__ :Any=5 , __magic_name__ :Dict=2 , __magic_name__ :Any=0.1 , **__magic_name__ :List[Any] , ) ->Tuple:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowercase : Dict = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__magic_name__ , __magic_name__ ):
lowercase : Any = backbone_config.get("""model_type""" )
lowercase : Tuple = CONFIG_MAPPING[backbone_model_type]
lowercase : List[Any] = config_class.from_dict(__magic_name__ )
# set timm attributes to None
lowercase : Any = None, None, None
lowercase : Union[str, Any] = use_timm_backbone
lowercase : Dict = backbone_config
lowercase : Any = num_channels
lowercase : Any = num_queries
lowercase : List[str] = d_model
lowercase : Any = encoder_ffn_dim
lowercase : Optional[int] = encoder_layers
lowercase : Optional[int] = encoder_attention_heads
lowercase : Dict = decoder_ffn_dim
lowercase : int = decoder_layers
lowercase : Optional[Any] = decoder_attention_heads
lowercase : Tuple = dropout
lowercase : Dict = attention_dropout
lowercase : Union[str, Any] = activation_dropout
lowercase : Optional[int] = activation_function
lowercase : Any = init_std
lowercase : Tuple = init_xavier_std
lowercase : Union[str, Any] = encoder_layerdrop
lowercase : str = decoder_layerdrop
lowercase : Dict = encoder_layers
lowercase : List[Any] = auxiliary_loss
lowercase : Tuple = position_embedding_type
lowercase : Optional[Any] = backbone
lowercase : Any = use_pretrained_backbone
lowercase : str = dilation
# Hungarian matcher
lowercase : Tuple = class_cost
lowercase : Any = bbox_cost
lowercase : Dict = giou_cost
# Loss coefficients
lowercase : str = mask_loss_coefficient
lowercase : Union[str, Any] = dice_loss_coefficient
lowercase : Optional[int] = bbox_loss_coefficient
lowercase : List[Any] = giou_loss_coefficient
lowercase : Union[str, Any] = eos_coefficient
super().__init__(is_encoder_decoder=__magic_name__ , **__magic_name__ )
@property
def __snake_case ( self :Optional[int] ) ->int:
return self.encoder_attention_heads
@property
def __snake_case ( self :str ) ->int:
return self.d_model
@classmethod
def __snake_case ( cls :Any , __magic_name__ :PretrainedConfig , **__magic_name__ :List[Any] ) ->Tuple:
return cls(backbone_config=__magic_name__ , **__magic_name__ )
def __snake_case ( self :List[str] ) ->Dict[str, any]:
lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowercase : List[Any] = self.backbone_config.to_dict()
lowercase : Tuple = self.__class__.model_type
return output
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : int = version.parse("""1.11""" )
@property
def __snake_case ( self :Dict ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __snake_case ( self :List[Any] ) ->float:
return 1E-5
@property
def __snake_case ( self :int ) ->int:
return 12
| 704 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def UpperCamelCase ( _A , _A , _A=0 ) -> Any:
# Format the message.
if name is None:
lowercase : Tuple = None
else:
lowercase : Any = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
lowercase : List[str] = fmt.format(_A )
# Print and recurse (if needed).
if isinstance(_A , _A ):
if msg is not None:
print(_A )
for k in val.keys():
recursive_print(_A , val[k] , spaces + 2 )
elif isinstance(_A , torch.Tensor ):
print(_A , """:""" , val.size() )
else:
print(_A , """:""" , _A )
def UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[int]:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowercase : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowercase : str = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowercase : Dict = param.view(*_A )
lowercase : str = param.transpose(0 , 2 )
lowercase : Optional[int] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowercase : Union[str, Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowercase : Any = param.view(*_A )
lowercase : Optional[int] = param.transpose(0 , 1 ).contiguous()
lowercase : Any = param.view(*_A )
return param
def UpperCamelCase ( _A , _A , _A ) -> List[str]:
# The converted output model.
lowercase : str = {}
# old versions did not store training args
lowercase : Optional[int] = input_state_dict.get("""args""" , _A )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowercase : List[Any] = ds_args.padded_vocab_size
lowercase : int = ds_args.max_position_embeddings
lowercase : Optional[Any] = ds_args.hidden_size
lowercase : int = ds_args.num_layers
lowercase : Union[str, Any] = ds_args.num_attention_heads
lowercase : List[str] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowercase : int = config.n_head
# The hidden_size per head.
lowercase : Union[str, Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowercase : List[str] = input_state_dict["""checkpoint_version"""]
else:
lowercase : List[str] = 0.0
# The model.
lowercase : Tuple = input_state_dict["""model"""]
# The language model.
lowercase : Optional[int] = model["""language_model"""]
# The embeddings.
lowercase : Optional[int] = lm["""embedding"""]
# The word embeddings.
lowercase : Union[str, Any] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
lowercase : Tuple = word_embeddings[: config.vocab_size, :]
lowercase : Tuple = word_embeddings
# The position embeddings.
lowercase : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowercase : Any = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" )
# Store the position embeddings.
lowercase : Optional[int] = pos_embeddings
# The transformer.
lowercase : str = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
lowercase : str = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
lowercase : Optional[Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowercase : int = layer_re.match(_A )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowercase : Optional[int] = int(m.group(1 ) )
# The name of the operation.
lowercase : Union[str, Any] = m.group(2 )
# Is it a weight or a bias?
lowercase : Dict = m.group(3 )
# The name of the layer.
lowercase : List[Any] = F"""transformer.h.{layer_idx}"""
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
lowercase : List[str] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
lowercase : Dict = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowercase : Optional[int] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _A , _A )
lowercase : List[str] = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowercase : str = torch.tensor(-1e4 , dtype=torch.floataa )
lowercase : Tuple = masked_bias
lowercase : str = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowercase : int = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowercase : List[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowercase : str = fix_query_key_value_ordering(_A , _A , 3 , _A , _A )
# Store. No change of shape.
lowercase : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowercase : Optional[int] = megatron_to_transformers[op_name]
lowercase : int = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowercase : Union[str, Any] = megatron_to_transformers[op_name]
lowercase : str = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowercase : Dict = transformer["""final_layernorm.weight"""]
lowercase : Any = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
lowercase : int = word_embeddings
# It should be done!
return output_state_dict
def UpperCamelCase ( ) -> int:
# Create the argument parser.
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=_A , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=_A , help="""An optional config json file describing the pre-trained model.""" , )
lowercase : Dict = parser.parse_args()
# Extract the basename.
lowercase : Union[str, Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
lowercase : Any = torch.load(_A , map_location="""cpu""" )
else:
lowercase : Tuple = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
lowercase : Dict = input_state_dict.get("""args""" , _A )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowercase : Optional[int] = """gelu_fast"""
elif ds_args.openai_gelu:
lowercase : int = """gelu_new"""
else:
lowercase : Tuple = """gelu"""
else:
# in the very early days this used to be "gelu_new"
lowercase : List[str] = """gelu_new"""
# Spell out all parameters in case the defaults change.
lowercase : Optional[Any] = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=_A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=_A , summary_activation=_A , summary_proj_to_labels=_A , summary_first_dropout=0.1 , scale_attn_weights=_A , use_cache=_A , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
lowercase : int = GPTaConfig.from_json_file(args.config_file )
lowercase : Optional[Any] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
lowercase : List[str] = convert_megatron_checkpoint(_A , _A , _A )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_A , _A )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowercase : Optional[Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowercase : Tuple = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
lowercase : Optional[int] = ds_args.tokenizer_name_or_path
else:
raise ValueError(F"""Unrecognized tokenizer_type {tokenizer_type}""" )
else:
lowercase : Optional[Any] = """gpt2"""
lowercase : int = AutoTokenizer.from_pretrained(_A )
lowercase : Union[str, Any] = type(_A ).__name__
lowercase : Any = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(_A )
# Save tokenizer based on args
print(F"""Adding {tokenizer_class} tokenizer files""" )
tokenizer.save_pretrained(_A )
# Store the state_dict to file.
lowercase : Any = os.path.join(_A , """pytorch_model.bin""" )
print(F"""Saving checkpoint to \"{output_checkpoint_file}\"""" )
torch.save(_A , _A )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 348 | 0 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : List[str] = AudioLDMPipeline
__A : List[str] = TEXT_TO_AUDIO_PARAMS
__A : Tuple = TEXT_TO_AUDIO_BATCH_PARAMS
__A : List[str] = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __lowercase ( self) -> int:
'''simple docstring'''
torch.manual_seed(0)
a__ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowercase , )
a__ : Any = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowercase , set_alpha_to_one=lowercase , )
torch.manual_seed(0)
a__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
a__ : Tuple = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
a__ : str = ClapTextModelWithProjection(lowercase)
a__ : Dict = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77)
a__ : Union[str, Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowercase , )
a__ : Dict = SpeechTaHifiGan(lowercase)
a__ : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def __lowercase ( self , lowercase , lowercase=0) -> Optional[int]:
'''simple docstring'''
if str(lowercase).startswith('mps'):
a__ : int = torch.manual_seed(lowercase)
else:
a__ : List[str] = torch.Generator(device=lowercase).manual_seed(lowercase)
a__ : Optional[int] = {
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : List[Any] = self.get_dummy_components()
a__ : int = AudioLDMPipeline(**lowercase)
a__ : Optional[int] = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : Optional[Any] = self.get_dummy_inputs(lowercase)
a__ : Optional[int] = audioldm_pipe(**lowercase)
a__ : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase) == 256
a__ : List[Any] = audio[:10]
a__ : Tuple = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33])
assert np.abs(audio_slice - expected_slice).max() < 1e-2
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : str = self.get_dummy_components()
a__ : Tuple = AudioLDMPipeline(**lowercase)
a__ : Any = audioldm_pipe.to(lowercase)
a__ : Tuple = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : List[Any] = self.get_dummy_inputs(lowercase)
a__ : Dict = 3 * [inputs['prompt']]
# forward
a__ : Union[str, Any] = audioldm_pipe(**lowercase)
a__ : List[str] = output.audios[0]
a__ : List[str] = self.get_dummy_inputs(lowercase)
a__ : Tuple = 3 * [inputs.pop('prompt')]
a__ : Optional[Any] = audioldm_pipe.tokenizer(
lowercase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowercase , return_tensors='pt' , )
a__ : str = text_inputs['input_ids'].to(lowercase)
a__ : Union[str, Any] = audioldm_pipe.text_encoder(
lowercase , )
a__ : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
a__ : Any = F.normalize(lowercase , dim=-1)
a__ : Any = prompt_embeds
# forward
a__ : int = audioldm_pipe(**lowercase)
a__ : Optional[Any] = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1e-2
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Any = self.get_dummy_components()
a__ : Tuple = AudioLDMPipeline(**lowercase)
a__ : str = audioldm_pipe.to(lowercase)
a__ : Union[str, Any] = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : str = self.get_dummy_inputs(lowercase)
a__ : List[str] = 3 * ['this is a negative prompt']
a__ : Optional[int] = negative_prompt
a__ : Union[str, Any] = 3 * [inputs['prompt']]
# forward
a__ : Union[str, Any] = audioldm_pipe(**lowercase)
a__ : str = output.audios[0]
a__ : List[str] = self.get_dummy_inputs(lowercase)
a__ : int = 3 * [inputs.pop('prompt')]
a__ : Tuple = []
for p in [prompt, negative_prompt]:
a__ : Optional[int] = audioldm_pipe.tokenizer(
lowercase , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowercase , return_tensors='pt' , )
a__ : Optional[int] = text_inputs['input_ids'].to(lowercase)
a__ : List[str] = audioldm_pipe.text_encoder(
lowercase , )
a__ : int = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
a__ : Optional[Any] = F.normalize(lowercase , dim=-1)
embeds.append(lowercase)
a__ , a__ : Union[str, Any] = embeds
# forward
a__ : str = audioldm_pipe(**lowercase)
a__ : int = output.audios[0]
assert np.abs(audio_a - audio_a).max() < 1e-2
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : Optional[Any] = self.get_dummy_components()
a__ : List[Any] = PNDMScheduler(skip_prk_steps=lowercase)
a__ : Any = AudioLDMPipeline(**lowercase)
a__ : Any = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : List[Any] = self.get_dummy_inputs(lowercase)
a__ : Tuple = 'egg cracking'
a__ : Optional[int] = audioldm_pipe(**lowercase , negative_prompt=lowercase)
a__ : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase) == 256
a__ : Optional[Any] = audio[:10]
a__ : List[str] = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32])
assert np.abs(audio_slice - expected_slice).max() < 1e-2
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : List[str] = self.get_dummy_components()
a__ : Optional[int] = PNDMScheduler(skip_prk_steps=lowercase)
a__ : Optional[Any] = AudioLDMPipeline(**lowercase)
a__ : Dict = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : int = 'A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
a__ : Dict = audioldm_pipe(lowercase , num_inference_steps=2).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
a__ : Union[str, Any] = 2
a__ : Optional[Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
a__ : Tuple = 2
a__ : int = audioldm_pipe(lowercase , num_inference_steps=2 , num_waveforms_per_prompt=lowercase).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
a__ : Dict = 2
a__ : int = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowercase).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
a__ : List[str] = self.get_dummy_components()
a__ : List[Any] = AudioLDMPipeline(**lowercase)
a__ : str = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : List[Any] = audioldm_pipe.vocoder.config.sampling_rate
a__ : Union[str, Any] = self.get_dummy_inputs(lowercase)
a__ : Optional[int] = audioldm_pipe(audio_length_in_s=0.0_16 , **lowercase)
a__ : int = output.audios[0]
assert audio.ndim == 1
assert len(lowercase) / vocoder_sampling_rate == 0.0_16
a__ : Optional[int] = audioldm_pipe(audio_length_in_s=0.0_32 , **lowercase)
a__ : List[Any] = output.audios[0]
assert audio.ndim == 1
assert len(lowercase) / vocoder_sampling_rate == 0.0_32
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : int = self.get_dummy_components()
a__ : Optional[Any] = AudioLDMPipeline(**lowercase)
a__ : Dict = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : Tuple = ['hey']
a__ : Dict = audioldm_pipe(lowercase , num_inference_steps=1)
a__ : Union[str, Any] = output.audios.shape
assert audio_shape == (1, 256)
a__ : Union[str, Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
a__ : str = SpeechTaHifiGan(lowercase).to(lowercase)
a__ : Union[str, Any] = audioldm_pipe(lowercase , num_inference_steps=1)
a__ : List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def __lowercase ( self) -> Any:
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowercase)
def __lowercase ( self) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowercase)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowercase ( self) -> str:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase)
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self , lowercase , lowercase="cpu" , lowercase=torch.floataa , lowercase=0) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] = torch.Generator(device=lowercase).manual_seed(lowercase)
a__ : str = np.random.RandomState(lowercase).standard_normal((1, 8, 128, 16))
a__ : Union[str, Any] = torch.from_numpy(lowercase).to(device=lowercase , dtype=lowercase)
a__ : Optional[Any] = {
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Optional[int] = AudioLDMPipeline.from_pretrained('cvssp/audioldm')
a__ : Tuple = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : Optional[Any] = self.get_inputs(lowercase)
a__ : Any = 25
a__ : str = audioldm_pipe(**lowercase).audios[0]
assert audio.ndim == 1
assert len(lowercase) == 8_1920
a__ : List[str] = audio[7_7230:7_7240]
a__ : Union[str, Any] = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15])
a__ : Union[str, Any] = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1e-2
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : List[str] = AudioLDMPipeline.from_pretrained('cvssp/audioldm')
a__ : Dict = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
a__ : Union[str, Any] = audioldm_pipe.to(lowercase)
audioldm_pipe.set_progress_bar_config(disable=lowercase)
a__ : Optional[Any] = self.get_inputs(lowercase)
a__ : Any = audioldm_pipe(**lowercase).audios[0]
assert audio.ndim == 1
assert len(lowercase) == 8_1920
a__ : Optional[Any] = audio[2_7780:2_7790]
a__ : Any = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12])
a__ : Optional[Any] = np.abs(expected_slice - audio_slice).max()
assert max_diff < 3e-2
| 302 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : Dict = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Dict = '''yolos'''
def __init__( self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-12 , lowercase=[512, 864] , lowercase=16 , lowercase=3 , lowercase=True , lowercase=100 , lowercase=True , lowercase=False , lowercase=1 , lowercase=5 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.1 , **lowercase , ) -> List[str]:
'''simple docstring'''
super().__init__(**lowercase)
a__ : int = hidden_size
a__ : str = num_hidden_layers
a__ : List[str] = num_attention_heads
a__ : List[str] = intermediate_size
a__ : Any = hidden_act
a__ : str = hidden_dropout_prob
a__ : Union[str, Any] = attention_probs_dropout_prob
a__ : Any = initializer_range
a__ : List[Any] = layer_norm_eps
a__ : Union[str, Any] = image_size
a__ : Optional[int] = patch_size
a__ : Tuple = num_channels
a__ : Optional[Any] = qkv_bias
a__ : Union[str, Any] = num_detection_tokens
a__ : Union[str, Any] = use_mid_position_embeddings
a__ : List[Any] = auxiliary_loss
# Hungarian matcher
a__ : Union[str, Any] = class_cost
a__ : Union[str, Any] = bbox_cost
a__ : Dict = giou_cost
# Loss coefficients
a__ : Optional[Any] = bbox_loss_coefficient
a__ : List[Any] = giou_loss_coefficient
a__ : str = eos_coefficient
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : int = version.parse('''1.11''' )
@property
def __lowercase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __lowercase ( self) -> float:
'''simple docstring'''
return 1e-4
@property
def __lowercase ( self) -> int:
'''simple docstring'''
return 12
| 302 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_lowerCAmelCase = ""
_lowerCAmelCase = ""
_lowerCAmelCase = ""
_lowerCAmelCase = 1 # (0 is vertical, 1 is horizontal)
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : int = get_dataset(__lowerCAmelCase , __lowerCAmelCase )
print('Processing...' )
_lowerCAmelCase : Any = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for index, image in enumerate(__lowerCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase : Tuple = random_chars(32 )
_lowerCAmelCase : int = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowerCAmelCase : int = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(__lowerCAmelCase )} with {file_name}""" )
_lowerCAmelCase : Optional[int] = []
for anno in new_annos[index]:
_lowerCAmelCase : List[Any] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__lowerCAmelCase )
with open(f"""/{file_root}.txt""" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : int = []
for label_file in glob.glob(os.path.join(__lowerCAmelCase , '*.txt' ) ):
_lowerCAmelCase : Any = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__lowerCAmelCase ) as in_file:
_lowerCAmelCase : Tuple = in_file.readlines()
_lowerCAmelCase : str = os.path.join(__lowerCAmelCase , f"""{label_name}.jpg""" )
_lowerCAmelCase : Union[str, Any] = []
for obj_list in obj_lists:
_lowerCAmelCase : Any = obj_list.rstrip('\n' ).split(' ' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Tuple = []
for idx in range(len(__lowerCAmelCase ) ):
_lowerCAmelCase : str = []
_lowerCAmelCase : int = img_list[idx]
path_list.append(__lowerCAmelCase )
_lowerCAmelCase : Any = anno_list[idx]
_lowerCAmelCase : Optional[int] = cva.imread(__lowerCAmelCase )
if flip_type == 1:
_lowerCAmelCase : Optional[int] = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
_lowerCAmelCase : Union[str, Any] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
_lowerCAmelCase : Union[str, Any] = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
_lowerCAmelCase : Union[str, Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCAmelCase )
new_imgs_list.append(__lowerCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def lowerCamelCase__ ( _lowerCamelCase = 32 ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase : List[str] = ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 712 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_1_2,
"""facebook/dpr-question_encoder-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": 5_1_2,
"""facebook/dpr-reader-multiset-base""": 5_1_2,
}
_lowerCAmelCase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
_lowerCAmelCase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
_lowerCAmelCase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
_lowerCAmelCase = r"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(a__ )
class __UpperCamelCase :
def __call__( self ,_A ,_A = None ,_A = None ,_A = False ,_A = False ,_A = None ,_A = None ,_A = None ,**_A ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
elif titles is None or texts is None:
_lowerCAmelCase : Optional[int] = titles if texts is None else texts
return super().__call__(
_A ,_A ,padding=_A ,truncation=_A ,max_length=_A ,return_tensors=_A ,return_attention_mask=_A ,**_A ,)
_lowerCAmelCase : str = titles if not isinstance(_A ,_A ) else [titles]
_lowerCAmelCase : List[str] = texts if not isinstance(_A ,_A ) else [texts]
_lowerCAmelCase : Union[str, Any] = len(_A )
_lowerCAmelCase : Optional[Any] = questions if not isinstance(_A ,_A ) else [questions] * n_passages
if len(_A ) != len(_A ):
raise ValueError(
F"""There should be as many titles than texts but got {len(_A )} titles and {len(_A )} texts.""" )
_lowerCAmelCase : Union[str, Any] = super().__call__(_A ,_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Tuple = super().__call__(_A ,add_special_tokens=_A ,padding=_A ,truncation=_A )['input_ids']
_lowerCAmelCase : Optional[int] = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_A ,_A )
]
}
if return_attention_mask is not False:
_lowerCAmelCase : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_lowerCAmelCase : List[Any] = attention_mask
return self.pad(_A ,padding=_A ,max_length=_A ,return_tensors=_A )
def __lowerCamelCase ( self ,_A ,_A ,_A = 16 ,_A = 64 ,_A = 4 ,):
'''simple docstring'''
_lowerCAmelCase : int = reader_input['input_ids']
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : int = reader_output[:3]
_lowerCAmelCase : Optional[Any] = len(_A )
_lowerCAmelCase : Any = sorted(range(_A ) ,reverse=_A ,key=relevance_logits.__getitem__ )
_lowerCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_lowerCAmelCase : int = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_lowerCAmelCase : Any = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_lowerCAmelCase : List[str] = sequence_ids.index(self.pad_token_id )
else:
_lowerCAmelCase : Optional[int] = len(_A )
_lowerCAmelCase : Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_A ,top_spans=_A ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_A ,start_index=_A ,end_index=_A ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCamelCase ( self ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
for start_index, start_score in enumerate(_A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_lowerCAmelCase : Tuple = sorted(_A ,key=lambda _A : x[1] ,reverse=_A )
_lowerCAmelCase : int = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" )
_lowerCAmelCase : List[str] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a__ )
class __UpperCamelCase ( a__ , a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase = ["input_ids", "attention_mask"]
| 16 | 0 |
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'''--original_config_file''',
default=None,
type=str,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--scheduler_type''',
default='''pndm''',
type=str,
help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''',
)
parser.add_argument(
'''--pipeline_type''',
default=None,
type=str,
help=(
'''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''''
'''. If `None` pipeline will be automatically inferred.'''
),
)
parser.add_argument(
'''--image_size''',
default=None,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--prediction_type''',
default=None,
type=str,
help=(
'''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'''
''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
parser.add_argument(
'''--stable_unclip''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''',
)
parser.add_argument(
'''--stable_unclip_prior''',
type=str,
default=None,
required=False,
help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''',
)
parser.add_argument(
'''--clip_stats_path''',
type=str,
help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''',
required=False,
)
parser.add_argument(
'''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.'''
)
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--vae_path''',
type=str,
default=None,
required=False,
help='''Set to a path, hub id to an already converted vae to not convert it again.''',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 39 |
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return torch.atana(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / math.pi * 2
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int ) ->Optional[int]:
super().__init__()
snake_case_ = DiffusionAttnUnetaD(_UpperCamelCase , n_attn_layers=4 )
snake_case_ = deepcopy(self.diffusion )
snake_case_ = torch.quasirandom.SobolEngine(1 , scramble=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif name.startswith(SCREAMING_SNAKE_CASE__ ):
return [name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 ):
snake_case_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
snake_case_ = 0
if string.startswith('''net.3.''' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('''net.''' ):
snake_case_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('''main.''' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = '''mid_block'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - 1}''' if int(SCREAMING_SNAKE_CASE__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(SCREAMING_SNAKE_CASE__ )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(SCREAMING_SNAKE_CASE__ )
snake_case_ = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
snake_case_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(SCREAMING_SNAKE_CASE__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = transform_conv_attns(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = v
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
snake_case_ = download(SCREAMING_SNAKE_CASE__ )
snake_case_ = MODELS_MAP[model_name]['''sample_rate''']
snake_case_ = MODELS_MAP[model_name]['''sample_size''']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ , sample_rate=SCREAMING_SNAKE_CASE__ )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(SCREAMING_SNAKE_CASE__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE__ )['''state_dict'''] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(SCREAMING_SNAKE_CASE__ )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(SCREAMING_SNAKE_CASE__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE__ )[:-1]
snake_case_ = get_crash_schedule(SCREAMING_SNAKE_CASE__ )
snake_case_ = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).audios
snake_case_ = sampling.iplms_sample(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , SCREAMING_SNAKE_CASE__ )
print('''Diff max''' , SCREAMING_SNAKE_CASE__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase_ = parser.parse_args()
main(args) | 39 | 1 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Optional[int] = old_name
if "patch_embed" in old_name:
A_ : Optional[Any] = old_name.split(""".""" )
if layer == "0":
A_ : str = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
A_ : Optional[int] = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
A_ : List[str] = old_name.replace("""3""" , """convolution2""" )
else:
A_ : Union[str, Any] = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(R"""\d\.\d""" , a_ ):
A_ : Optional[Any] = R"""\b\d{2}\b"""
if bool(re.search(a_ , a_ ) ):
A_ : Tuple = re.search(R"""\d\.\d\d.""" , a_ ).group()
else:
A_ : List[str] = re.search(R"""\d\.\d.""" , a_ ).group()
if int(match[0] ) < 6:
A_ : Union[str, Any] = old_name.replace(a_ , """""" )
A_ : Optional[int] = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
A_ : Dict = """intermediate_stages.""" + trimmed_name
else:
A_ : Dict = old_name.replace(a_ , """""" )
if int(match[2] ) < num_meta4D_last_stage:
A_ : List[Any] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
A_ : str = str(int(match[2] ) - num_meta4D_last_stage )
A_ : Tuple = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
A_ : List[Any] = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
A_ : Any = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
A_ : int = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
A_ : int = trimmed_name.replace("""fc2""" , """linear_out""" )
A_ : Union[str, Any] = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(R""".\d.""" , a_ ):
A_ : Optional[int] = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
A_ : Union[str, Any] = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A_ : Optional[int] = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A_ : Dict = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
A_ : int = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
A_ : Optional[Any] = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
A_ : List[str] = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
A_ : Dict = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A_ : Dict = new_name.replace("""norm""" , """layernorm""" )
A_ : Any = """efficientformer.""" + new_name
else:
A_ : Tuple = """efficientformer.encoder.""" + new_name
return new_name
def UpperCAmelCase ( a_ , a_ ) -> Union[str, Any]:
"""simple docstring"""
for key in checkpoint.copy().keys():
A_ : Optional[int] = checkpoint.pop(a_ )
A_ : Dict = val
return checkpoint
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
A_ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Union[str, Any] = Image.open(requests.get(a_ , stream=a_ ).raw )
return image
def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Any = torch.load(a_ , map_location="""cpu""" )["""model"""]
A_ : Any = EfficientFormerConfig.from_json_file(a_ )
A_ : str = EfficientFormerForImageClassificationWithTeacher(a_ )
A_ : Optional[Any] = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
A_ : int = config.depths[-1] - config.num_metaad_blocks + 1
A_ : Optional[Any] = convert_torch_checkpoint(a_ , a_ )
model.load_state_dict(a_ )
model.eval()
A_ : Any = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
A_ : List[str] = prepare_img()
A_ : Tuple = 2_5_6
A_ : int = 2_2_4
A_ : Tuple = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
A_ : List[Any] = processor(images=a_ , return_tensors="""pt""" ).pixel_values
# original processing pipeline
A_ : str = Compose(
[
Resize(a_ , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(a_ ),
ToTensor(),
Normalize(a_ , a_ ),
] )
A_ : List[str] = image_transforms(a_ ).unsqueeze(0 )
assert torch.allclose(a_ , a_ )
A_ : Optional[int] = model(a_ )
A_ : Optional[Any] = outputs.logits
A_ : str = (1, 1_0_0_0)
if "l1" in model_name:
A_ : List[str] = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :1_0] , a_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A_ : Any = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :1_0] , a_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A_ : Union[str, Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(a_ ).mkdir(exist_ok=a_ )
model.save_pretrained(a_ )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(a_ )
print(F"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=F"Bearnardd/{pytorch_dump_path}" , commit_message="""Add model""" , use_temp_dir=a_ , )
processor.push_to_hub(
repo_id=F"Bearnardd/{pytorch_dump_path}" , commit_message="""Add image processor""" , use_temp_dir=a_ , )
if __name__ == "__main__":
UpperCamelCase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
UpperCamelCase__ : Tuple = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 713 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( a_ , a_=1 ) -> str:
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def UpperCAmelCase ( a_ , a_=0 ) -> Union[str, Any]:
"""simple docstring"""
A_ : str = []
for old_item in old_list:
A_ : List[str] = old_item.replace("""in_layers.0""" , """norm1""" )
A_ : Tuple = new_item.replace("""in_layers.2""" , """conv1""" )
A_ : List[Any] = new_item.replace("""out_layers.0""" , """norm2""" )
A_ : Dict = new_item.replace("""out_layers.3""" , """conv2""" )
A_ : int = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
A_ : Optional[int] = new_item.replace("""skip_connection""" , """conv_shortcut""" )
A_ : int = shave_segments(a_ , n_shave_prefix_segments=a_ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase ( a_ , a_=0 ) -> Union[str, Any]:
"""simple docstring"""
A_ : Any = []
for old_item in old_list:
A_ : Optional[int] = old_item
A_ : Dict = new_item.replace("""norm.weight""" , """group_norm.weight""" )
A_ : Optional[int] = new_item.replace("""norm.bias""" , """group_norm.bias""" )
A_ : Union[str, Any] = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
A_ : Optional[Any] = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
A_ : Union[str, Any] = shave_segments(a_ , n_shave_prefix_segments=a_ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def UpperCAmelCase ( a_ , a_ , a_ , a_=None , a_=None , a_=None ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(a_ , a_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
A_ : Optional[int] = old_checkpoint[path]
A_ : Union[str, Any] = old_tensor.shape[0] // 3
A_ : Union[str, Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
A_ : Any = old_tensor.shape[0] // config["""num_head_channels"""] // 3
A_ : Tuple = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
A_ , A_ , A_ : Tuple = old_tensor.split(channels // num_heads , dim=1 )
A_ : List[str] = query.reshape(a_ )
A_ : Union[str, Any] = key.reshape(a_ )
A_ : Optional[int] = value.reshape(a_ )
for path in paths:
A_ : Optional[int] = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
A_ : Union[str, Any] = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
A_ : Any = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
A_ : Tuple = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
A_ : Union[str, Any] = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
A_ : Tuple = old_checkpoint[path["""old"""]][:, :, 0]
else:
A_ : Optional[int] = old_checkpoint[path["""old"""]]
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Optional[Any] = {}
A_ : Dict = checkpoint["""time_embed.0.weight"""]
A_ : Dict = checkpoint["""time_embed.0.bias"""]
A_ : Optional[Any] = checkpoint["""time_embed.2.weight"""]
A_ : Tuple = checkpoint["""time_embed.2.bias"""]
A_ : List[Any] = checkpoint["""input_blocks.0.0.weight"""]
A_ : List[str] = checkpoint["""input_blocks.0.0.bias"""]
A_ : Any = checkpoint["""out.0.weight"""]
A_ : Any = checkpoint["""out.0.bias"""]
A_ : Optional[int] = checkpoint["""out.2.weight"""]
A_ : int = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
A_ : List[Any] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
A_ : Optional[int] = {
layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key]
for layer_id in range(a_ )
}
# Retrieves the keys for the middle blocks only
A_ : Optional[int] = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
A_ : Optional[int] = {
layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key]
for layer_id in range(a_ )
}
# Retrieves the keys for the output blocks only
A_ : str = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
A_ : Any = {
layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key]
for layer_id in range(a_ )
}
for i in range(1 , a_ ):
A_ : int = (i - 1) // (config["""num_res_blocks"""] + 1)
A_ : Optional[int] = (i - 1) % (config["""num_res_blocks"""] + 1)
A_ : Dict = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key]
A_ : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key]
if F"input_blocks.{i}.0.op.weight" in checkpoint:
A_ : List[Any] = checkpoint[
F"input_blocks.{i}.0.op.weight"
]
A_ : Optional[Any] = checkpoint[
F"input_blocks.{i}.0.op.bias"
]
continue
A_ : Optional[Any] = renew_resnet_paths(a_ )
A_ : Dict = {"""old""": F"input_blocks.{i}.0", """new""": F"down_blocks.{block_id}.resnets.{layer_in_block_id}"}
A_ : str = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
a_ , a_ , a_ , additional_replacements=[meta_path, resnet_op] , config=a_ )
if len(a_ ):
A_ : Any = renew_attention_paths(a_ )
A_ : Any = {
"""old""": F"input_blocks.{i}.1",
"""new""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}",
}
A_ : List[Any] = {
F"input_blocks.{i}.1.qkv.bias": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"input_blocks.{i}.1.qkv.weight": {
"""key""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
a_ , a_ , a_ , additional_replacements=[meta_path] , attention_paths_to_split=a_ , config=a_ , )
A_ : Tuple = middle_blocks[0]
A_ : Optional[int] = middle_blocks[1]
A_ : int = middle_blocks[2]
A_ : int = renew_resnet_paths(a_ )
assign_to_checkpoint(a_ , a_ , a_ , config=a_ )
A_ : Tuple = renew_resnet_paths(a_ )
assign_to_checkpoint(a_ , a_ , a_ , config=a_ )
A_ : Optional[int] = renew_attention_paths(a_ )
A_ : Optional[int] = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
a_ , a_ , a_ , attention_paths_to_split=a_ , config=a_ )
for i in range(a_ ):
A_ : Union[str, Any] = i // (config["""num_res_blocks"""] + 1)
A_ : Union[str, Any] = i % (config["""num_res_blocks"""] + 1)
A_ : List[str] = [shave_segments(a_ , 2 ) for name in output_blocks[i]]
A_ : Union[str, Any] = {}
for layer in output_block_layers:
A_ , A_ : List[str] = layer.split(""".""" )[0], shave_segments(a_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(a_ )
else:
A_ : Optional[int] = [layer_name]
if len(a_ ) > 1:
A_ : List[str] = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key]
A_ : List[Any] = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key]
A_ : str = renew_resnet_paths(a_ )
A_ : Dict = renew_resnet_paths(a_ )
A_ : Tuple = {"""old""": F"output_blocks.{i}.0", """new""": F"up_blocks.{block_id}.resnets.{layer_in_block_id}"}
assign_to_checkpoint(a_ , a_ , a_ , additional_replacements=[meta_path] , config=a_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
A_ : List[Any] = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
A_ : Optional[Any] = checkpoint[
F"output_blocks.{i}.{index}.conv.weight"
]
A_ : Any = checkpoint[
F"output_blocks.{i}.{index}.conv.bias"
]
# Clear attentions as they have been attributed above.
if len(a_ ) == 2:
A_ : int = []
if len(a_ ):
A_ : Union[str, Any] = renew_attention_paths(a_ )
A_ : Optional[int] = {
"""old""": F"output_blocks.{i}.1",
"""new""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}",
}
A_ : str = {
F"output_blocks.{i}.1.qkv.bias": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias",
},
F"output_blocks.{i}.1.qkv.weight": {
"""key""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight",
"""query""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight",
"""value""": F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight",
},
}
assign_to_checkpoint(
a_ , a_ , a_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=a_ , )
else:
A_ : List[str] = renew_resnet_paths(a_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
A_ : List[str] = """.""".join(["""output_blocks""", str(a_ ), path["""old"""]] )
A_ : int = """.""".join(["""up_blocks""", str(a_ ), """resnets""", str(a_ ), path["""new"""]] )
A_ : Tuple = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
UpperCamelCase__ : Tuple = parser.parse_args()
UpperCamelCase__ : Union[str, Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
UpperCamelCase__ : Any = json.loads(f.read())
UpperCamelCase__ : Any = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
UpperCamelCase__ : List[str] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
UpperCamelCase__ : Dict = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
UpperCamelCase__ : List[Any] = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
UpperCamelCase__ : str = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 385 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Optional[int] = logging.get_logger(__name__)
def lowerCamelCase__ ( _A , _A=False ):
'''simple docstring'''
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCamelCase__ ( _A , _A , _A=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ = ""
else:
snake_case_ = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case_ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
snake_case_ = dct.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ = val
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( _A , _A , _A=True ):
'''simple docstring'''
snake_case_ = ViTConfig()
# patch_size
if model_name[-1] == "8":
snake_case_ = 8
# set labels if required
if not base_model:
snake_case_ = 1000
snake_case_ = "huggingface/label-files"
snake_case_ = "imagenet-1k-id2label.json"
snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) )
snake_case_ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
snake_case_ = 384
snake_case_ = 1536
snake_case_ = 12
snake_case_ = 6
# load original model from torch hub
snake_case_ = torch.hub.load("facebookresearch/dino:main" , SCREAMING_SNAKE_CASE__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ = original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE__ )
snake_case_ = create_rename_keys(SCREAMING_SNAKE_CASE__ , base_model=SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
if base_model:
snake_case_ = ViTModel(SCREAMING_SNAKE_CASE__ , add_pooling_layer=SCREAMING_SNAKE_CASE__ ).eval()
else:
snake_case_ = ViTForImageClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image, prepared by ViTImageProcessor
snake_case_ = ViTImageProcessor()
snake_case_ = image_processor(images=prepare_img() , return_tensors="pt" )
snake_case_ = encoding["pixel_values"]
snake_case_ = model(SCREAMING_SNAKE_CASE__ )
if base_model:
snake_case_ = original_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
snake_case_ = original_model(SCREAMING_SNAKE_CASE__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
lowercase__ : str = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 376 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = '▁'
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = BigBirdTokenizer
lowerCAmelCase_ : Optional[int] = BigBirdTokenizerFast
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Dict = True
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
UpperCAmelCase__ = self.tokenizer_class(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = """<s>"""
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """[MASK]""" )
self.assertEqual(len(_UpperCAmelCase ) , 10_04 )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = """I was born in 92000, and this is falsé."""
UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = BigBirdTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [2_85, 46, 10, 1_70, 3_82] , )
UpperCAmelCase__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = """Hello World!"""
UpperCAmelCase__ = [65, 1_85_36, 22_60, 1_01, 66]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
# fmt: off
UpperCAmelCase__ = [65, 8_71, 4_19, 3_58, 9_46, 9_91, 25_21, 4_52, 3_58, 13_57, 3_87, 77_51, 35_36, 1_12, 9_85, 4_56, 1_26, 8_65, 9_38, 54_00, 57_34, 4_58, 13_68, 4_67, 7_86, 24_62, 52_46, 11_59, 6_33, 8_65, 45_19, 4_57, 5_82, 8_52, 25_57, 4_27, 9_16, 5_08, 4_05, 3_43_24, 4_97, 3_91, 4_08, 1_13_42, 12_44, 3_85, 1_00, 9_38, 9_85, 4_56, 5_74, 3_62, 1_25_97, 32_00, 31_29, 11_72, 66] # noqa: E231
# fmt: on
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase__ = """ """.join(_UpperCAmelCase )
UpperCAmelCase__ = self.big_tokenizer.encode_plus(_UpperCAmelCase , return_tensors="""pt""" , return_token_type_ids=_UpperCAmelCase )
UpperCAmelCase__ = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=_UpperCAmelCase )
UpperCAmelCase__ = BigBirdConfig(attention_type="""original_full""" )
UpperCAmelCase__ = BigBirdModel(_UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCAmelCase )
model(**_UpperCAmelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""" )
UpperCAmelCase__ = tokenizer.decode(tokenizer("""Paris is the [MASK].""" ).input_ids )
self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""" )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = {"""input_ids""": [[65, 3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14, 66], [65, 4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
| 603 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCAmelCase = None
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json',
'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json',
},
}
lowerCAmelCase = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
lowerCAmelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class _a ( __lowerCamelCase ):
_lowercase : Dict = VOCAB_FILES_NAMES
_lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
_lowercase : List[str] = MBartTokenizer
_lowercase : str = []
_lowercase : str = []
def __init__( self: int , UpperCamelCase_: List[Any]=None , UpperCamelCase_: int=None , UpperCamelCase_: List[str]="<s>" , UpperCamelCase_: Union[str, Any]="</s>" , UpperCamelCase_: Any="</s>" , UpperCamelCase_: Tuple="<s>" , UpperCamelCase_: Tuple="<unk>" , UpperCamelCase_: Dict="<pad>" , UpperCamelCase_: Any="<mask>" , UpperCamelCase_: Any=None , UpperCamelCase_: str=None , UpperCamelCase_: Union[str, Any]=None , **UpperCamelCase_: List[Any] , ) -> Tuple:
"""simple docstring"""
lowercase__ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token
super().__init__(
vocab_file=UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
lowercase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowercase__ = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase__ = src_lang if src_lang is not None else 'en_XX'
lowercase__ = self.convert_tokens_to_ids(self._src_lang )
lowercase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCamelCase_ ( self: Any ) -> Optional[Any]:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: str ) -> str:
"""simple docstring"""
lowercase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> Optional[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCamelCase_ ( self: int , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[str]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple , UpperCamelCase_: str , UpperCamelCase_: Optional[str] , UpperCamelCase_: Optional[str] , **UpperCamelCase_: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase__ = src_lang
lowercase__ = self(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
lowercase__ = self.convert_tokens_to_ids(UpperCAmelCase_ )
lowercase__ = tgt_lang_id
return inputs
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: List[str] , UpperCamelCase_: str = "en_XX" , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "ro_RO" , **UpperCamelCase_: Dict , ) -> List[str]:
"""simple docstring"""
lowercase__ = src_lang
lowercase__ = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> List[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCamelCase_ ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: Any ) -> Dict:
"""simple docstring"""
lowercase__ = self.convert_tokens_to_ids(UpperCAmelCase_ )
lowercase__ = []
lowercase__ = [self.eos_token_id, self.cur_lang_code]
lowercase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: str ) -> int:
"""simple docstring"""
lowercase__ = self.convert_tokens_to_ids(UpperCAmelCase_ )
lowercase__ = []
lowercase__ = [self.eos_token_id, self.cur_lang_code]
lowercase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCamelCase_ ( self: Any , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Dict:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowercase__ = os.path.join(
UpperCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 706 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
lowerCAmelCase = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase__ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
lowercase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase__ = value
elif weight_type == "weight_g":
lowercase__ = value
elif weight_type == "weight_v":
lowercase__ = value
elif weight_type == "bias":
lowercase__ = value
else:
lowercase__ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = []
lowercase__ = fairseq_model.state_dict()
lowercase__ = hf_model.feature_extractor
lowercase__ = hf_model.adapter
for name, value in fairseq_dict.items():
lowercase__ = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == '''group''' , )
lowercase__ = True
elif any(x in name for x in ['''adaptor''', '''w2v_encoder.proj.''', '''w2v_proj_ln.'''] ):
load_adapter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase__ = True
if "*" in mapped_key:
lowercase__ = name.split(SCREAMING_SNAKE_CASE )[0].split('''.''' )[-2]
lowercase__ = mapped_key.replace('''*''' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowercase__ = '''weight_g'''
elif "weight_v" in name:
lowercase__ = '''weight_v'''
elif "bias" in name:
lowercase__ = '''bias'''
elif "weight" in name:
lowercase__ = '''weight'''
else:
lowercase__ = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f'Unused weights: {unused_weights}' )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = full_name.split('''conv_layers.''' )[-1]
lowercase__ = name.split('''.''' )
lowercase__ = int(items[0] )
lowercase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase__ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = full_name.split('''adaptor.''' )[-1]
lowercase__ = name.split('''.''' )
if items[1].isdigit():
lowercase__ = int(items[1] )
else:
lowercase__ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
lowercase__ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
lowercase__ = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
lowercase__ = emb.weight.data
return lin_layer
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase__ = WavaVecaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , add_adapter=SCREAMING_SNAKE_CASE , adapter_stride=SCREAMING_SNAKE_CASE , adapter_kernel_size=SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE , output_hidden_size=SCREAMING_SNAKE_CASE , )
lowercase__ = MBartConfig.from_pretrained(SCREAMING_SNAKE_CASE )
# load model
lowercase__ , lowercase__ , lowercase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'''config_yaml''': config_yaml_path,
'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ),
'''w2v_path''': checkpoint_path,
'''load_pretrained_decoder_from''': None,
} , )
lowercase__ = model[0].eval()
# load feature extractor
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE , use_auth_token=SCREAMING_SNAKE_CASE )
# set weights for wav2vec2 encoder
lowercase__ = WavaVecaModel(SCREAMING_SNAKE_CASE )
recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
# load decoder weights
lowercase__ = MBartForCausalLM(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowercase__ = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
lowercase__ = False
lowercase__ = MBartaaTokenizer(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ = hf_wavavec.config.to_dict()
lowercase__ = tokenizer.pad_token_id
lowercase__ = tokenizer.bos_token_id
lowercase__ = tokenizer.eos_token_id
lowercase__ = '''mbart50'''
lowercase__ = '''wav2vec2'''
lowercase__ = tokenizer.eos_token_id
lowercase__ = 25_00_04
lowercase__ = tokenizer.eos_token_id
lowercase__ = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_yaml_path', default=None, type=str, help='Path to yaml file of fine-tuned model')
parser.add_argument(
'--encoder_config_path',
default='facebook/wav2vec2-xls-r-1b',
type=str,
help='Path to hf encoder wav2vec2 checkpoint config',
)
parser.add_argument(
'--decoder_config_path',
default='facebook/mbart-large-50-one-to-many-mmt',
type=str,
help='Path to hf decoder checkpoint config',
)
parser.add_argument('--add_adapter', default=True, type=bool, help='whethere to add model adapter layers')
parser.add_argument('--adapter_stride', default=2, type=int, help='stride of adapter layers')
parser.add_argument('--adapter_kernel_size', default=3, type=int, help='kernel size of adapter layers')
parser.add_argument('--encoder_output_dim', default=1024, type=int, help='encoder output dim')
parser.add_argument('--start_token_id', default=25_0004, type=int, help='`decoder_start_token_id` of model config')
lowerCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 429 | 0 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
__SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
__SCREAMING_SNAKE_CASE = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
__SCREAMING_SNAKE_CASE = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://unbabel.github.io/COMET/html/index.html' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'sources': datasets.Value('string' , id='sequence' ),
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/Unbabel/COMET'] , reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] , )
def lowerCamelCase_ ( self :Any , _lowerCamelCase :Dict ):
'''simple docstring'''
if self.config_name == "default":
UpperCamelCase_ : List[Any] =comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
UpperCamelCase_ : Optional[int] =comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :Dict , _lowerCamelCase :str , _lowerCamelCase :Optional[int] , _lowerCamelCase :Dict=None , _lowerCamelCase :str=False ):
'''simple docstring'''
if gpus is None:
UpperCamelCase_ : List[Any] =1 if torch.cuda.is_available() else 0
UpperCamelCase_ : Union[str, Any] ={"src": sources, "mt": predictions, "ref": references}
UpperCamelCase_ : Union[str, Any] =[dict(zip(snake_case__ , snake_case__ ) ) for t in zip(*data.values() )]
UpperCamelCase_ : Optional[int] =self.scorer.predict(snake_case__ , gpus=snake_case__ , progress_bar=snake_case__ )
return {"mean_score": mean_score, "scores": scores}
| 357 |
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : int = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
_lowerCAmelCase : List[str] = {
"""allenai/led-base-16384""": 16_384,
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ =LEDTokenizer
SCREAMING_SNAKE_CASE_ =['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , snake_case__ : str=None , snake_case__ : List[Any]=None , snake_case__ : Dict=None , snake_case__ : List[str]="replace" , snake_case__ : Optional[int]="<s>" , snake_case__ : List[str]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Dict="<s>" , snake_case__ : Tuple="<unk>" , snake_case__ : Any="<pad>" , snake_case__ : Dict="<mask>" , snake_case__ : int=False , snake_case__ : Optional[int]=True , **snake_case__ : List[Any] , ):
'''simple docstring'''
super().__init__(
snake_case__ , snake_case__ , tokenizer_file=snake_case__ , errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , trim_offsets=snake_case__ , **snake_case__ , )
UpperCAmelCase__ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
UpperCAmelCase__ : Dict = getattr(snake_case__ , pre_tok_state.pop("type" ) )
UpperCAmelCase__ : str = add_prefix_space
UpperCAmelCase__ : Any = pre_tok_class(**snake_case__ )
UpperCAmelCase__ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ : List[str] = "post_processor"
UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , snake_case__ , snake_case__ )
if tokenizer_component_instance:
UpperCAmelCase__ : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Optional[Any] = tuple(state["sep"] )
if "cls" in state:
UpperCAmelCase__ : Any = tuple(state["cls"] )
UpperCAmelCase__ : Any = False
if state.get("add_prefix_space" , snake_case__ ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : List[Any] = True
if state.get("trim_offsets" , snake_case__ ) != trim_offsets:
UpperCAmelCase__ : Optional[int] = trim_offsets
UpperCAmelCase__ : List[Any] = True
if changes_to_apply:
UpperCAmelCase__ : List[str] = getattr(snake_case__ , state.pop("type" ) )
UpperCAmelCase__ : Optional[int] = component_class(**snake_case__ )
setattr(self.backend_tokenizer , snake_case__ , snake_case__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __a ( self : Any ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __a ( self : Any , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else value
UpperCAmelCase__ : Dict = value
def __a ( self : str , *snake_case__ : Any , **snake_case__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.get("is_split_into_words" , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*snake_case__ , **snake_case__ )
def __a ( self : List[str] , *snake_case__ : Union[str, Any] , **snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = kwargs.get("is_split_into_words" , snake_case__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*snake_case__ , **snake_case__ )
def __a ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
def __a ( self : str , snake_case__ : List[Any] , snake_case__ : str=None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __a ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self : Any , snake_case__ : Union[Dict[str, EncodedInput], BatchEncoding] , snake_case__ : Optional[int] = None , snake_case__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , ):
'''simple docstring'''
UpperCAmelCase__ : str = super()._pad(
encoded_inputs=snake_case__ , max_length=snake_case__ , padding_strategy=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase__ : Optional[int] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase__ : List[Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase__ : Any = len(encoded_inputs["global_attention_mask"] ) != len(snake_case__ )
if needs_to_be_padded:
UpperCAmelCase__ : List[str] = len(snake_case__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase__ : Dict = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase__ : Dict = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 438 | 0 |
import numpy as np
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict ):
'''simple docstring'''
lowercase__ = (0, 0)
lowercase__ = None
lowercase__ = 0
lowercase__ = 0
lowercase__ = 0
def __eq__( self : Tuple, lowerCamelCase : int ):
'''simple docstring'''
return self.position == cell.position
def lowercase__ ( self : Dict ):
'''simple docstring'''
print(self.position )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any], lowerCamelCase : List[Any]=(5, 5) ):
'''simple docstring'''
lowercase__ = np.zeros(__lowerCamelCase )
lowercase__ = world_size[0]
lowercase__ = world_size[1]
def lowercase__ ( self : Dict ):
'''simple docstring'''
print(self.w )
def lowercase__ ( self : str, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
lowercase__ = cell.position[0]
lowercase__ = cell.position[1]
lowercase__ = []
for n in neughbour_cord:
lowercase__ = current_x + n[0]
lowercase__ = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
lowercase__ = Cell()
lowercase__ = (x, y)
lowercase__ = cell
neighbours.append(__lowerCamelCase )
return neighbours
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = []
lowercase__ = []
_open.append(_lowerCamelCase )
while _open:
lowercase__ = np.argmin([n.f for n in _open] )
lowercase__ = _open[min_f]
_closed.append(_open.pop(_lowerCamelCase ) )
if current == goal:
break
for n in world.get_neigbours(_lowerCamelCase ):
for c in _closed:
if c == n:
continue
lowercase__ = current.g + 1
lowercase__ = n.position
lowercase__ = goal.position
lowercase__ = (ya - ya) ** 2 + (xa - xa) ** 2
lowercase__ = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_lowerCamelCase )
lowercase__ = []
while current.parent is not None:
path.append(current.position )
lowercase__ = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
A__ : int = Gridworld()
# Start position and goal
A__ : Dict = Cell()
A__ : List[str] = (0, 0)
A__ : List[str] = Cell()
A__ : Dict = (4, 4)
print(F"path from {start.position} to {goal.position}")
A__ : Dict = astar(world, start, goal)
# Just for visual reasons.
for i in s:
A__ : Dict = 1
print(world.w)
| 717 |
from math import sqrt
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
lowercase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowercase__ = False
for divisor in range(2 , int(round(sqrt(lowerCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowercase__ = False
break
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'status' must been from type bool"
return status
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowercase__ = list(range(2 , n + 1 ) )
lowercase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCamelCase_ ) ):
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowercase__ = 0
# filters actual prime numbers.
lowercase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
lowercase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCamelCase_ ):
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
lowercase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowercase__ = 2
lowercase__ = number
if number == 0 or number == 1:
ans.append(lowerCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCamelCase_ ):
while quotient != 1:
if is_prime(lowerCamelCase_ ) and (quotient % factor == 0):
ans.append(lowerCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type list"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = max(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowercase__ = 0
# prime factorization of 'number'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = min(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'ans' must been from type int"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (number > 2) and is_even(lowerCamelCase_ )
), "'number' must been an int, even and > 2"
lowercase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowercase__ = get_prime_numbers(lowerCamelCase_ )
lowercase__ = len(lowerCamelCase_ )
# run variable for while-loops.
lowercase__ = 0
lowercase__ = None
# exit variable. for break up the loops
lowercase__ = True
while i < len_pn and loop:
lowercase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowercase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (len(lowerCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 0
while numbera != 0:
lowercase__ = numbera % numbera
lowercase__ = numbera
lowercase__ = rest
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowercase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowercase__ = prime_factorization(lowerCamelCase_ )
lowercase__ = prime_factorization(lowerCamelCase_ )
elif numbera == 1 or numbera == 1:
lowercase__ = []
lowercase__ = []
lowercase__ = max(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = 0
lowercase__ = 0
lowercase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(max(lowerCamelCase_ , lowerCamelCase_ ) ):
ans *= n
else:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowercase__ = prime_fac_a.count(lowerCamelCase_ )
for _ in range(lowerCamelCase_ ):
ans *= n
done.append(lowerCamelCase_ )
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'number' must been a positive int"
lowercase__ = 0
lowercase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCamelCase_ ):
ans += 1
# precondition
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and is_prime(
lowerCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
is_prime(lowerCamelCase_ ) and is_prime(lowerCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowercase__ = p_number_a + 1 # jump to the next number
lowercase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(lowerCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCamelCase_ ):
number += 1
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and ans[0] != p_number_a
and ans[len(lowerCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
lowercase__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(lowerCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
lowercase__ = get_divisors(lowerCamelCase_ )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(lowerCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowercase__ = gcd(abs(lowerCamelCase_ ) , abs(lowerCamelCase_ ) )
# precondition
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
lowercase__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def a ( lowerCamelCase_ ):
'''simple docstring'''
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
lowercase__ = 0
lowercase__ = 1
lowercase__ = 1 # this will be return
for _ in range(n - 1 ):
lowercase__ = ans
ans += fiba
lowercase__ = tmp
return ans
| 671 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : List[Any] = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 21 |
'''simple docstring'''
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =OmegaConf.load(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =torch.load(UpperCamelCase__, map_location='''cpu''' )['''model''']
SCREAMING_SNAKE_CASE__ : int =list(state_dict.keys() )
# extract state_dict for VQVAE
SCREAMING_SNAKE_CASE__ : Any ={}
SCREAMING_SNAKE_CASE__ : int ='''first_stage_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[key]
# extract state_dict for UNetLDM
SCREAMING_SNAKE_CASE__ : List[str] ={}
SCREAMING_SNAKE_CASE__ : Dict ='''model.diffusion_model.'''
for key in keys:
if key.startswith(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =state_dict[key]
SCREAMING_SNAKE_CASE__ : Dict =config.model.params.first_stage_config.params
SCREAMING_SNAKE_CASE__ : List[str] =config.model.params.unet_config.params
SCREAMING_SNAKE_CASE__ : Dict =VQModel(**UpperCamelCase__ ).eval()
vqvae.load_state_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =UNetLDMModel(**UpperCamelCase__ ).eval()
unet.load_state_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : int =DDIMScheduler(
timesteps=config.model.params.timesteps, beta_schedule='''scaled_linear''', beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=UpperCamelCase__, )
SCREAMING_SNAKE_CASE__ : Any =LDMPipeline(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
pipeline.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
a_ = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path) | 296 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__a = logging.get_logger(__name__)
class lowercase__( __A ):
"""simple docstring"""
a :Optional[int] = ["""pixel_values"""]
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] = True , SCREAMING_SNAKE_CASE_ : Dict = None , SCREAMING_SNAKE_CASE_ : Optional[Any] = PIL.Image.BICUBIC , SCREAMING_SNAKE_CASE_ : Optional[int] = True , SCREAMING_SNAKE_CASE_ : Dict = None , SCREAMING_SNAKE_CASE_ : Any = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : List[Any] = True , SCREAMING_SNAKE_CASE_ : Tuple = True , SCREAMING_SNAKE_CASE_ : Any = None , SCREAMING_SNAKE_CASE_ : Any = None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[str]:
super().__init__(**UpperCamelCase__ )
lowercase_ = size if size is not None else {'height': 2_5_6, 'width': 2_5_6}
lowercase_ = get_size_dict(UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowercase_ = get_size_dict(UpperCamelCase__ , param_name='''crop_size''' )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_normalize
lowercase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any = PIL.Image.BICUBIC , SCREAMING_SNAKE_CASE_ : Union[str, Any] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[Any]:
lowercase_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
UpperCamelCase__ , size=(size['''height'''], size['''width''']) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] = None , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ) -> List[str]:
lowercase_ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> Optional[int]:
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> Tuple:
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : List[str] = None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Any = None , SCREAMING_SNAKE_CASE_ : Dict = None , SCREAMING_SNAKE_CASE_ : Dict = None , SCREAMING_SNAKE_CASE_ : Dict = None , SCREAMING_SNAKE_CASE_ : Tuple = None , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : Any = None , SCREAMING_SNAKE_CASE_ : Dict = None , SCREAMING_SNAKE_CASE_ : List[Any] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Dict , ) -> List[Any]:
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ = image_mean if image_mean is not None else self.image_mean
lowercase_ = image_std if image_std is not None else self.image_std
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(UpperCamelCase__ )
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(UpperCamelCase__ , param_name='''crop_size''' )
lowercase_ = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
lowercase_ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
lowercase_ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
lowercase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
lowercase_ = {'pixel_values': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 709 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('>=', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__a = get_logger(__name__)
def a ( snake_case__: Optional[Any] , snake_case__: Any , snake_case__: int , snake_case__: int , snake_case__: Optional[Any]=0 ):
'''simple docstring'''
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase_ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
lowercase_ = os.path.join(snake_case__ , snake_case__ )
if accelerator.process_index == 0:
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(snake_case__ , snake_case__ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase_ = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
lowercase_ = os.path.join(snake_case__ , snake_case__ )
logger.info(F'''Saving model to {output_model_file}''' )
torch.save(snake_case__ , snake_case__ )
logger.info(F'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase_ = os.path.join(snake_case__ , F'''{MODEL_NAME}_{model_index}''' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
logger.info(F'''Saving model to {ckpt_dir}''' )
lowercase_ = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , )
logger.info(F'''Model saved to {ckpt_dir}''' )
def a ( snake_case__: Any , snake_case__: Optional[Any] , snake_case__: Optional[Any] , snake_case__: Union[str, Any] , snake_case__: Tuple=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(snake_case__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
lowercase_ = F'''{MODEL_NAME}.bin''' if model_index == 0 else F'''{MODEL_NAME}_{model_index}.bin'''
lowercase_ = os.path.join(snake_case__ , snake_case__ )
logger.info(F'''Loading model from {input_model_file}''' )
lowercase_ = torch.load(snake_case__ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase_ = (
F'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else F'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
lowercase_ = os.path.join(snake_case__ , snake_case__ )
logger.info(F'''Loading model from {input_model_file}''' )
lowercase_ = torch.load(snake_case__ )
logger.info(F'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase_ = (
os.path.join(snake_case__ , F'''{MODEL_NAME}_{model_index}''' )
if F'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading model from {ckpt_dir}''' )
lowercase_ = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , )
lowercase_ = state_dict['''model''']
logger.info(F'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(snake_case__ )
def a ( snake_case__: Dict , snake_case__: str , snake_case__: Optional[Any] , snake_case__: Any , snake_case__: str , snake_case__: str=0 ):
'''simple docstring'''
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase_ = FSDP.optim_state_dict(snake_case__ , snake_case__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowercase_ = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
lowercase_ = os.path.join(snake_case__ , snake_case__ )
logger.info(F'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(snake_case__ , snake_case__ )
logger.info(F'''Optimizer state saved in {output_optimizer_file}''' )
else:
lowercase_ = os.path.join(snake_case__ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
logger.info(F'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , )
logger.info(F'''Optimizer state saved in {ckpt_dir}''' )
def a ( snake_case__: List[Any] , snake_case__: int , snake_case__: Dict , snake_case__: Union[str, Any] , snake_case__: str , snake_case__: List[str]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowercase_ = (
F'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else F'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
lowercase_ = os.path.join(snake_case__ , snake_case__ )
logger.info(F'''Loading Optimizer state from {input_optimizer_file}''' )
lowercase_ = torch.load(snake_case__ )
logger.info(F'''Optimizer state loaded from {input_optimizer_file}''' )
else:
lowercase_ = (
os.path.join(snake_case__ , F'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if F'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(F'''Loading Optimizer from {ckpt_dir}''' )
lowercase_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , )
lowercase_ = optim_state['''optimizer''']
logger.info(F'''Optimizer loaded from {ckpt_dir}''' )
lowercase_ = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ )
optimizer.load_state_dict(snake_case__ )
| 409 | 0 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case__ = ['''text''', '''image''', '''audio''']
def lowerCamelCase__ ( a : List[str] ) -> Tuple:
"""simple docstring"""
a__ :Any = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(a , a ):
inputs.append(create_inputs(a ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def lowerCamelCase__ ( a : List ) -> str:
"""simple docstring"""
a__ :Any = []
for output in outputs:
if isinstance(a , (str, AgentText) ):
output_types.append("text" )
elif isinstance(a , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(a , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class lowerCAmelCase_ :
def _snake_case ( self : str ) ->Tuple:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
a__ :Any = self.tool.inputs
for _input in inputs:
if isinstance(_input , __A ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
a__ :Optional[int] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _snake_case ( self : List[Any] ) ->Any:
"""simple docstring"""
a__ :Union[str, Any] = create_inputs(self.tool.inputs )
a__ :Dict = self.tool(*__A )
# There is a single output
if len(self.tool.outputs ) == 1:
a__ :Optional[Any] = [outputs]
self.assertListEqual(output_types(__A ) , self.tool.outputs )
def _snake_case ( self : Tuple ) ->Dict:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _snake_case ( self : str ) ->List[Any]:
"""simple docstring"""
a__ :Any = create_inputs(self.tool.inputs )
a__ :Union[str, Any] = self.tool(*__A )
if not isinstance(__A , __A ):
a__ :List[str] = [outputs]
self.assertEqual(len(__A ) , len(self.tool.outputs ) )
for output, output_type in zip(__A , self.tool.outputs ):
a__ :Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__A , __A ) )
def _snake_case ( self : Dict ) ->List[str]:
"""simple docstring"""
a__ :Tuple = create_inputs(self.tool.inputs )
a__ :Tuple = []
for _input, input_type in zip(__A , self.tool.inputs ):
if isinstance(__A , __A ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
a__ :int = self.tool(*__A )
if not isinstance(__A , __A ):
a__ :Any = [outputs]
self.assertEqual(len(__A ) , len(self.tool.outputs ) )
| 395 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 395 | 1 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _lowercase , _lowercase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self , _lowercase = 1 , _lowercase = None , _lowercase = 50 , _lowercase = "pil" , _lowercase = True , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_lowercase , )
_lowerCAmelCase = image.to(self.device )
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCAmelCase = self.unet(_lowercase , _lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_lowercase ), "This is a local test"
| 162 |
'''simple docstring'''
from math import pi, sqrt
def A (__lowerCamelCase :float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(__lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(__lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def A ():
assert gamma(0.5 ) == sqrt(__lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = 1.0
while num:
_lowercase = float(input("""Gamma of: """))
print(F"""gamma({num}) = {gamma(num)}""")
print("""\nEnter 0 to exit...""")
| 162 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
lowerCAmelCase = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class lowerCamelCase ( unittest.TestCase ):
def A( self):
__UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , '''models/bert/'''))
__UpperCAmelCase : List[Any] = self.transformer_dir
shutil.copy(
os.path.join(__UpperCAmelCase , '''src/transformers/models/bert/modeling_bert.py''') , os.path.join(self.transformer_dir , '''models/bert/modeling_bert.py''') , )
def A( self):
__UpperCAmelCase : int = 'src/transformers'
shutil.rmtree(self.transformer_dir)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__=None):
__UpperCAmelCase : Union[str, Any] = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
__UpperCAmelCase : List[Any] = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
__UpperCAmelCase : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9)
__UpperCAmelCase : int = black.format_str(__UpperCAmelCase , mode=__UpperCAmelCase)
__UpperCAmelCase : Any = os.path.join(self.transformer_dir , '''new_code.py''')
with open(__UpperCAmelCase , '''w''' , newline='''\n''') as f:
f.write(__UpperCAmelCase)
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__UpperCAmelCase)) == 0)
else:
check_copies.is_copy_consistent(f.name , overwrite=__UpperCAmelCase)
with open(__UpperCAmelCase , '''r''') as f:
self.assertTrue(f.read() , __UpperCAmelCase)
def A( self):
__UpperCAmelCase : str = check_copies.find_code_in_transformers('''models.bert.modeling_bert.BertLMPredictionHead''')
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
def A( self):
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead''' , '''BertLMPredictionHead''' , __UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , re.sub('''Bert''' , '''TestModel''' , __UpperCAmelCase) , )
# Copy consistency with a really long name
__UpperCAmelCase : Optional[int] = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , F"{long_class_name}LMPredictionHead" , re.sub('''Bert''' , __UpperCAmelCase , __UpperCAmelCase) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel''' , '''TestModelLMPredictionHead''' , __UpperCAmelCase , overwrite_result=re.sub('''Bert''' , '''TestModel''' , __UpperCAmelCase) , )
def A( self):
__UpperCAmelCase : Tuple = check_copies.LOCALIZED_READMES['README_zh-hans.md']
__UpperCAmelCase : Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
__UpperCAmelCase : Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
__UpperCAmelCase : Any = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
__UpperCAmelCase : Union[str, Any] = check_copies.convert_to_localized_md(
__UpperCAmelCase , __UpperCAmelCase , localized_readme['''format_model_list'''])
self.assertFalse(__UpperCAmelCase)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
__UpperCAmelCase : Optional[int] = check_copies.convert_to_localized_md(
__UpperCAmelCase , __UpperCAmelCase , localized_readme['''format_model_list'''])
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__UpperCAmelCase)
__UpperCAmelCase : List[str] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
__UpperCAmelCase : Dict = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
__UpperCAmelCase : Optional[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
__UpperCAmelCase : int = check_copies.convert_to_localized_md(
__UpperCAmelCase , __UpperCAmelCase , localized_readme['''format_model_list'''])
# Check if the model link is synchronized.
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
| 462 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93 | 0 |
from string import ascii_lowercase, ascii_uppercase
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> str:
'''simple docstring'''
if not sentence:
return ""
__UpperCamelCase : Union[str, Any] = dict(zip(_lowerCamelCase , _lowerCamelCase))
return lower_to_upper.get(sentence[0] , sentence[0]) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod() | 94 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Tuple = BertConfig.from_json_file(_lowerCamelCase)
print(F'Building PyTorch model from configuration: {config}')
__UpperCamelCase : List[Any] = BertForPreTraining(_lowerCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}')
torch.save(model.state_dict() , _lowerCamelCase)
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 94 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowercase :
def __init__( self : Any ,A : Union[str, Any] ,A : Optional[Any]=2 ,A : Tuple=True ,A : Tuple=False ,A : Optional[int]=10 ,A : Any=3 ,A : Tuple=32 * 8 ,A : List[Any]=32 * 8 ,A : int=4 ,A : List[Any]=64 ,):
'''simple docstring'''
UpperCAmelCase__ : Tuple = parent
UpperCAmelCase__ : Union[str, Any] = batch_size
UpperCAmelCase__ : Tuple = is_training
UpperCAmelCase__ : Optional[Any] = use_auxiliary_loss
UpperCAmelCase__ : int = num_queries
UpperCAmelCase__ : Tuple = num_channels
UpperCAmelCase__ : List[str] = min_size
UpperCAmelCase__ : Optional[Any] = max_size
UpperCAmelCase__ : Tuple = num_labels
UpperCAmelCase__ : List[str] = hidden_dim
UpperCAmelCase__ : Union[str, Any] = hidden_dim
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A )
UpperCAmelCase__ : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A )
UpperCAmelCase__ : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A ) > 0.5
).float()
UpperCAmelCase__ : Optional[Any] = (torch.rand((self.batch_size, self.num_labels) ,device=A ) > 0.5).long()
UpperCAmelCase__ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __lowercase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
UpperCAmelCase__ : int = self.num_queries
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : List[Any] = [1, 1, 1, 1]
UpperCAmelCase__ : List[Any] = self.num_channels
UpperCAmelCase__ : List[Any] = 64
UpperCAmelCase__ : str = 128
UpperCAmelCase__ : int = self.hidden_dim
UpperCAmelCase__ : List[Any] = self.hidden_dim
UpperCAmelCase__ : int = self.hidden_dim
return config
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def __lowercase ( self : Optional[int] ,A : Any ,A : int ):
'''simple docstring'''
UpperCAmelCase__ : int = output.encoder_hidden_states
UpperCAmelCase__ : List[str] = output.pixel_decoder_hidden_states
UpperCAmelCase__ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A ) ,config.decoder_layers )
def __lowercase ( self : List[Any] ,A : List[Any] ,A : Dict ,A : Union[str, Any] ,A : str=False ):
'''simple docstring'''
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = MaskaFormerModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase__ : Tuple = model(pixel_values=A ,pixel_mask=A )
UpperCAmelCase__ : Optional[int] = model(A ,output_hidden_states=A )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A ,A )
def __lowercase ( self : Tuple ,A : List[str] ,A : Dict ,A : Tuple ,A : Any ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = MaskaFormerForUniversalSegmentation(config=A )
model.to(A )
model.eval()
def comm_check_on_output(A : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(pixel_values=A ,pixel_mask=A )
UpperCAmelCase__ : Tuple = model(A )
comm_check_on_output(A )
UpperCAmelCase__ : Optional[Any] = model(
pixel_values=A ,pixel_mask=A ,mask_labels=A ,class_labels=A )
comm_check_on_output(A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class __lowercase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = MaskaFormerModelTester(self )
UpperCAmelCase__ : Union[str, Any] = ConfigTester(self ,config_class=A ,has_text_modality=A )
def __lowercase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A ,**A ,output_hidden_states=A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def __lowercase ( self : str ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(A )
UpperCAmelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,A )
@slow
def __lowercase ( self : List[Any] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase__ : Union[str, Any] = MaskaFormerModel.from_pretrained(A )
self.assertIsNotNone(A )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = (self.model_tester.min_size,) * 2
UpperCAmelCase__ : Dict = {
"""pixel_values""": torch.randn((2, 3, *size) ,device=A ),
"""mask_labels""": torch.randn((2, 10, *size) ,device=A ),
"""class_labels""": torch.zeros(2 ,10 ,device=A ).long(),
}
UpperCAmelCase__ : List[Any] = self.model_tester.get_config()
UpperCAmelCase__ : Union[str, Any] = MaskaFormerForUniversalSegmentation(A ).to(A )
UpperCAmelCase__ : List[str] = model(**A )
self.assertTrue(outputs.loss is not None )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A ,**A ,output_hidden_states=A )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[int] = model_class(A ).to(A )
UpperCAmelCase__ : str = model(**A ,output_attentions=A )
self.assertTrue(outputs.attentions is not None )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase__ : Optional[int] = self.all_model_classes[1]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ : Union[str, Any] = model_class(A )
model.to(A )
model.train()
UpperCAmelCase__ : Optional[Any] = model(A ,mask_labels=A ,class_labels=A ).loss
loss.backward()
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.all_model_classes[1]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : int = model_class(A ).to(A )
model.train()
UpperCAmelCase__ : Tuple = model(A ,mask_labels=A ,class_labels=A )
UpperCAmelCase__ : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase__ : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase__ : int = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase__ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__UpperCAmelCase = 1E-4
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __lowercase ( unittest.TestCase ):
@cached_property
def __lowercase ( self : str ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __lowercase ( self : str ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A )
UpperCAmelCase__ : str = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(A ,return_tensors="""pt""" ).to(A )
UpperCAmelCase__ : Optional[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A ,(1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase__ : int = model(**A )
UpperCAmelCase__ : List[Any] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A ,atol=A ) )
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A ,atol=A ) )
UpperCAmelCase__ : int = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A ,atol=A ) )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A ).eval()
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : List[str] = prepare_img()
UpperCAmelCase__ : List[str] = image_processor(A ,return_tensors="""pt""" ).to(A )
UpperCAmelCase__ : Optional[int] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A ,(1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = model(**A )
# masks_queries_logits
UpperCAmelCase__ : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase__ : Optional[Any] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCAmelCase__ : Dict = torch.tensor(A ).to(A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A ,atol=A ) )
# class_queries_logits
UpperCAmelCase__ : Any = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase__ : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A ,atol=A ) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A ).eval()
UpperCAmelCase__ : str = self.default_image_processor
UpperCAmelCase__ : Any = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,)
UpperCAmelCase__ : Tuple = inputs["""pixel_values"""].to(A )
UpperCAmelCase__ : List[Any] = [el.to(A ) for el in inputs["""mask_labels"""]]
UpperCAmelCase__ : Union[str, Any] = [el.to(A ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**A )
self.assertTrue(outputs.loss is not None )
| 65 |
from __future__ import annotations
from PIL import Image
# Define glider example
__a : Tuple = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__a : Optional[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _SCREAMING_SNAKE_CASE ( __lowercase : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
__A = []
for i in range(len(__lowercase ) ):
__A = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__A = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__lowercase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__lowercase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__lowercase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__A = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__lowercase )
return next_generation
def _SCREAMING_SNAKE_CASE ( __lowercase : list[list[int]] , __lowercase : int ) -> list[Image.Image]:
"""simple docstring"""
__A = []
for _ in range(__lowercase ):
# Create output image
__A = Image.new("""RGB""" , (len(cells[0] ), len(__lowercase )) )
__A = img.load()
# Save cells to image
for x in range(len(__lowercase ) ):
for y in range(len(cells[0] ) ):
__A = 2_5_5 - cells[y][x] * 2_5_5
__A = (colour, colour, colour)
# Save image
images.append(__lowercase )
__A = new_generation(__lowercase )
return images
if __name__ == "__main__":
__a : Optional[int] = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 637 | 0 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCamelCase : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__UpperCamelCase : int = """ def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
"""
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
__a = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
__a = self.transformer_dir
shutil.copy(
os.path.join(lowerCamelCase , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
__a = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ) ->List[Any]:
'''simple docstring'''
__a = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
__a = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
__a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
__a = black.format_str(lowerCamelCase , mode=lowerCamelCase )
__a = os.path.join(self.transformer_dir , 'new_code.py' )
with open(lowerCamelCase , 'w' , newline='\n' ) as f:
f.write(lowerCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase )
with open(lowerCamelCase , 'r' ) as f:
self.assertTrue(f.read() , lowerCamelCase )
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
__a = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(lowerCamelCase , lowerCamelCase )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
# Base copy consistency
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , lowerCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , lowerCamelCase ) , )
# Copy consistency with a really long name
__a = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , F"""{long_class_name}LMPredictionHead""" , re.sub('Bert' , lowerCamelCase , lowerCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , lowerCamelCase , overwrite_result=re.sub('Bert' , 'TestModel' , lowerCamelCase ) , )
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
__a = check_copies.LOCALIZED_READMES['README_zh-hans.md']
__a = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
__a = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
__a = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
__a , __a = check_copies.convert_to_localized_md(
lowerCamelCase , lowerCamelCase , localized_readme['format_model_list'] )
self.assertFalse(lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
__a , __a = check_copies.convert_to_localized_md(
lowerCamelCase , lowerCamelCase , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(lowerCamelCase )
__a = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
__a = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
__a = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
__a , __a = check_copies.convert_to_localized_md(
lowerCamelCase , lowerCamelCase , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(lowerCamelCase , lowerCamelCase ) | 270 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Tuple = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 270 | 1 |
'''simple docstring'''
def __lowercase (_lowercase ) -> list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__lowerCamelCase : List[Any] = [True] * (num + 1)
__lowerCamelCase : Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p, num + 1, _lowercase ):
__lowerCamelCase : Tuple = False
p += 1
return [prime for prime in range(2, num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase__ :int = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 150 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ :List[str] = logging.get_logger(__name__)
UpperCAmelCase__ :Union[str, Any] = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : str = 'altclip_text_model'
def __init__( self : List[Any] , A__ : Optional[int]=250002 , A__ : Any=1024 , A__ : List[Any]=24 , A__ : Dict=16 , A__ : Union[str, Any]=4096 , A__ : Union[str, Any]="gelu" , A__ : str=0.1 , A__ : int=0.1 , A__ : str=514 , A__ : Optional[int]=1 , A__ : Optional[Any]=0.02 , A__ : int=0.02 , A__ : Optional[Any]=1e-0_5 , A__ : int=1 , A__ : Optional[Any]=0 , A__ : Dict=2 , A__ : Optional[int]="absolute" , A__ : Optional[int]=True , A__ : List[str]=768 , **A__ : List[str] , ):
"""simple docstring"""
super().__init__(pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ , **A__ )
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Tuple = num_attention_heads
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : Dict = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : List[str] = max_position_embeddings
__lowerCamelCase : Optional[Any] = type_vocab_size
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Optional[int] = initializer_factor
__lowerCamelCase : List[Any] = layer_norm_eps
__lowerCamelCase : List[str] = position_embedding_type
__lowerCamelCase : str = use_cache
__lowerCamelCase : Optional[Any] = project_dim
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : List[Any] = 'altclip_vision_model'
def __init__( self : Optional[int] , A__ : str=768 , A__ : str=3072 , A__ : str=512 , A__ : Optional[int]=12 , A__ : List[Any]=12 , A__ : Union[str, Any]=3 , A__ : Dict=224 , A__ : List[Any]=32 , A__ : List[Any]="quick_gelu" , A__ : Dict=1e-5 , A__ : List[str]=0.0 , A__ : Dict=0.02 , A__ : List[str]=1.0 , **A__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**A__ )
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : Optional[int] = intermediate_size
__lowerCamelCase : Optional[Any] = projection_dim
__lowerCamelCase : Union[str, Any] = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : str = num_channels
__lowerCamelCase : Any = patch_size
__lowerCamelCase : Any = image_size
__lowerCamelCase : Any = initializer_range
__lowerCamelCase : List[str] = initializer_factor
__lowerCamelCase : List[str] = attention_dropout
__lowerCamelCase : Any = layer_norm_eps
__lowerCamelCase : Any = hidden_act
@classmethod
def a_ ( cls : str , A__ : Union[str, os.PathLike] , **A__ : List[str] ):
"""simple docstring"""
cls._set_token_in_kwargs(A__ )
__lowerCamelCase , __lowerCamelCase : str = cls.get_config_dict(A__ , **A__ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
__lowerCamelCase : Optional[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(A__ , **A__ )
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
snake_case__ : int = 'altclip'
snake_case__ : Dict = True
def __init__( self : Optional[Any] , A__ : Optional[Any]=None , A__ : Union[str, Any]=None , A__ : Union[str, Any]=768 , A__ : Tuple=2.6592 , **A__ : List[Any] ):
"""simple docstring"""
__lowerCamelCase : str = kwargs.pop("""text_config_dict""" , A__ )
__lowerCamelCase : Dict = kwargs.pop("""vision_config_dict""" , A__ )
super().__init__(**A__ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__lowerCamelCase : Any = {}
# This is the complete result when using `text_config_dict`.
__lowerCamelCase : Tuple = AltCLIPTextConfig(**A__ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__lowerCamelCase : Optional[Any] = (
f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
f"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
__lowerCamelCase : int = (
f"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
f"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(A__ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__lowerCamelCase : Dict = {}
# This is the complete result when using `vision_config_dict`.
__lowerCamelCase : List[str] = AltCLIPVisionConfig(**A__ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__lowerCamelCase : str = {
str(A__ ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__lowerCamelCase : List[Any] = (
f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
f"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
__lowerCamelCase : Optional[Any] = (
f"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
f"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(A__ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__lowerCamelCase : List[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
__lowerCamelCase : List[str] = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
__lowerCamelCase : Union[str, Any] = AltCLIPTextConfig(**A__ )
__lowerCamelCase : Optional[int] = AltCLIPVisionConfig(**A__ )
__lowerCamelCase : Optional[Any] = projection_dim
__lowerCamelCase : List[str] = logit_scale_init_value
__lowerCamelCase : Union[str, Any] = 1.0
@classmethod
def a_ ( cls : Optional[Any] , A__ : AltCLIPTextConfig , A__ : AltCLIPVisionConfig , **A__ : List[str] ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A__ )
def a_ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
__lowerCamelCase : Optional[Any] = self.text_config.to_dict()
__lowerCamelCase : Tuple = self.vision_config.to_dict()
__lowerCamelCase : Tuple = self.__class__.model_type
return output
| 150 | 1 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 468 |
"""simple docstring"""
def _lowerCAmelCase ( __lowerCamelCase:list ):
'''simple docstring'''
__magic_name__ = len(__lowerCamelCase )
for i in range(1 , __lowerCamelCase ):
__magic_name__ = collection[i]
__magic_name__ = 0
__magic_name__ = i - 1
while low <= high:
__magic_name__ = (low + high) // 2
if val < collection[mid]:
__magic_name__ = mid - 1
else:
__magic_name__ = mid + 1
for j in range(__lowerCamelCase , __lowerCamelCase , -1 ):
__magic_name__ = collection[j - 1]
__magic_name__ = val
return collection
if __name__ == "__main__":
lowercase = input('''Enter numbers separated by a comma:\n''').strip()
lowercase = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 468 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__a : Dict = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
__a : Optional[Any] = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
__a : str = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase( datasets.Metric ):
"""simple docstring"""
def __a ( self ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __a ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 1 , lowerCamelCase = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCamelCase , hypotheses=lowerCamelCase , min_len=lowerCamelCase , max_len=lowerCamelCase )
} | 397 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , 'width_multiplier' ) )
class __SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=64 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase="swish" , lowerCamelCase=3 , lowerCamelCase=32 , lowerCamelCase=0.1 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=10 , lowerCamelCase=None , lowerCamelCase=0.25 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , ) ->List[str]:
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->str:
'''simple docstring'''
__a = MobileViTVaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Any:
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__a =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__a =(
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__a =False
__a =False
__a =False
__a =False
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
pass
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(lowerCamelCase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@slow
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
__a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
__a = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
lowerCamelCase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors='pt' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
__a = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
__a = model.to(lowerCamelCase )
__a = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors='pt' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
__a = model.to(lowerCamelCase )
__a = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
__a = prepare_img()
__a = image_processor(images=lowerCamelCase , return_tensors='pt' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__a = model(**lowerCamelCase )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
__a = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase ) | 448 | 0 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_UpperCamelCase = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class __lowercase (UpperCAmelCase_ ):
def __init__( self , *A_ , **A_ ) ->List[Any]:
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
requires_backends(self , '''vision''' )
self.check_model_type(_lowercase )
def __call__( self , A_ , **A_ ) ->int:
'''simple docstring'''
return super().__call__(_lowercase , **_lowercase )
def UpperCamelCase__ ( self , **A_ ) ->Optional[int]:
'''simple docstring'''
return {}, {}, {}
def UpperCamelCase__ ( self , A_ ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = load_image(_lowercase )
__lowerCAmelCase : str = image.size
__lowerCAmelCase : Union[str, Any] = self.image_processor(images=_lowercase , return_tensors=self.framework )
return model_inputs
def UpperCamelCase__ ( self , A_ ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.model(**_lowercase )
return model_outputs
def UpperCamelCase__ ( self , A_ ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : int = model_outputs.predicted_depth
__lowerCAmelCase : Optional[int] = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=_lowercase )
__lowerCAmelCase : str = prediction.squeeze().cpu().numpy()
__lowerCAmelCase : List[str] = (output * 255 / np.max(_lowercase )).astype('''uint8''' )
__lowerCAmelCase : Union[str, Any] = Image.fromarray(_lowercase )
__lowerCAmelCase : List[Any] = {}
__lowerCAmelCase : Dict = predicted_depth
__lowerCAmelCase : Optional[Any] = depth
return output_dict
| 707 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """git_vision_model"""
def __init__( self , A_=768 , A_=3072 , A_=12 , A_=12 , A_=3 , A_=224 , A_=16 , A_="quick_gelu" , A_=1e-5 , A_=0.0 , A_=0.02 , **A_ , ) ->Any:
'''simple docstring'''
super().__init__(**A_ )
__lowerCAmelCase : int = hidden_size
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : str = num_attention_heads
__lowerCAmelCase : Optional[Any] = num_channels
__lowerCAmelCase : List[str] = patch_size
__lowerCAmelCase : Union[str, Any] = image_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : Dict = attention_dropout
__lowerCAmelCase : Tuple = layer_norm_eps
__lowerCAmelCase : Any = hidden_act
@classmethod
def UpperCamelCase__ ( cls , A_ , **A_ ) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A_ )
__lowerCAmelCase, __lowerCAmelCase : Optional[int] = cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
__lowerCAmelCase : int = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A_ , **A_ )
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """git"""
def __init__( self , A_=None , A_=3_0522 , A_=768 , A_=6 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=1024 , A_=0.02 , A_=1e-12 , A_=0 , A_="absolute" , A_=True , A_=False , A_=101 , A_=102 , A_=None , **A_ , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(bos_token_id=A_ , eos_token_id=A_ , pad_token_id=A_ , **A_ )
if vision_config is None:
__lowerCAmelCase : str = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
__lowerCAmelCase : Any = GitVisionConfig(**A_ )
__lowerCAmelCase : Tuple = vocab_size
__lowerCAmelCase : Union[str, Any] = hidden_size
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Dict = intermediate_size
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : Dict = max_position_embeddings
__lowerCAmelCase : Optional[int] = initializer_range
__lowerCAmelCase : str = layer_norm_eps
__lowerCAmelCase : List[str] = position_embedding_type
__lowerCAmelCase : int = use_cache
__lowerCAmelCase : List[str] = tie_word_embeddings
__lowerCAmelCase : List[str] = num_image_with_embedding
__lowerCAmelCase : List[str] = bos_token_id
__lowerCAmelCase : Dict = eos_token_id
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : int = copy.deepcopy(self.__dict__ )
__lowerCAmelCase : List[Any] = self.vision_config.to_dict()
__lowerCAmelCase : Optional[int] = self.__class__.model_type
return output
| 583 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.